file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/recover_optimizers.py
|
import copy
from hsr_tamp.pddlstream.algorithms.common import INIT_EVALUATION
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders, get_stream_plan_components
from hsr_tamp.pddlstream.algorithms.scheduling.utils import partition_external_plan
from hsr_tamp.pddlstream.language.constants import get_prefix, is_plan, get_args
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.optimizer import ComponentStream, OptimizerStream
from hsr_tamp.pddlstream.utils import neighbors_from_orders, get_mapping, safe_apply_mapping
CLUSTER = True
def get_optimizer(result):
return result.external.optimizer if isinstance(result.external, ComponentStream) else None
##################################################
def combine_optimizer_plan(stream_plan, functions):
if not stream_plan:
return stream_plan
optimizer = get_optimizer(stream_plan[-1])
if optimizer is None:
return stream_plan
function_plan = list(filter(lambda r: get_prefix(r.instance.external.head)
in optimizer.objectives, functions))
external_plan = stream_plan + function_plan
cluster_plans = get_stream_plan_components(external_plan) if CLUSTER else [external_plan]
optimizer_plan = []
for cluster_plan in cluster_plans:
if all(isinstance(r, FunctionResult) for r in cluster_plan):
continue
#if len(cluster_plan) == 1:
# optimizer_plan.append(cluster_plan[0])
# continue
stream = OptimizerStream(optimizer, cluster_plan)
instance = stream.get_instance(stream.input_objects, fluent_facts=stream.fluent_facts)
result = instance.get_result(stream.output_objects)
optimizer_plan.append(result)
return optimizer_plan
def combine_optimizers(evaluations, external_plan):
if not is_plan(external_plan):
return external_plan
stream_plan, function_plan = partition_external_plan(external_plan)
optimizers = {get_optimizer(r) for r in stream_plan} # None is like a unique optimizer
if len(optimizers - {None}) == 0:
return external_plan
print('Constraint plan: {}'.format(external_plan))
combined_results = []
for optimizer in optimizers:
relevant_results = [r for r in stream_plan if get_optimizer(r) == optimizer]
combined_results.extend(combine_optimizer_plan(relevant_results, function_plan))
combined_results.extend(function_plan)
current_facts = set()
for result in combined_results:
current_facts.update(filter(lambda f: evaluation_from_fact(f) in evaluations, result.get_domain()))
combined_plan = []
while combined_results:
for result in combined_results:
if set(result.get_domain()) <= current_facts:
combined_plan.append(result)
current_facts.update(result.get_certified())
combined_results.remove(result)
break
else: # TODO: can also just try one cluster and return
raise RuntimeError()
#return None
return combined_plan
##################################################
def retrace_instantiation(fact, streams, evaluations, free_parameters, visited_facts, planned_results):
# Makes two assumptions:
# 1) Each stream achieves a "primary" fact that uses all of its inputs + outputs
# 2) Outputs are only free parameters (no constants)
if (evaluation_from_fact(fact) in evaluations) or (fact in visited_facts):
return
visited_facts.add(fact)
for stream in streams:
for cert in stream.certified:
if get_prefix(fact) == get_prefix(cert):
mapping = get_mapping(get_args(cert), get_args(fact)) # Should be same anyways
if not all(p in mapping for p in (stream.inputs + stream.outputs)):
# TODO: assumes another effect is sufficient for binding
# Create arbitrary objects for inputs/outputs that aren't mentioned
# Can lead to incorrect ordering
continue
input_objects = safe_apply_mapping(stream.inputs, mapping)
output_objects = safe_apply_mapping(stream.outputs, mapping)
if not all(out in free_parameters for out in output_objects):
# Can only bind if free
continue
instance = stream.get_instance(input_objects)
for new_fact in instance.get_domain():
retrace_instantiation(new_fact, streams, evaluations, free_parameters,
visited_facts, planned_results)
planned_results.append(instance.get_result(output_objects))
def replan_with_optimizers(evaluations, external_plan, domain, optimizers):
# TODO: return multiple plans?
# TODO: can instead have multiple goal binding combinations
# TODO: can replan using samplers as well
if not is_plan(external_plan):
return None
optimizers = list(filter(lambda s: isinstance(s, ComponentStream), optimizers))
if not optimizers:
return None
stream_plan, function_plan = partition_external_plan(external_plan)
free_parameters = {o for r in stream_plan for o in r.output_objects}
#free_parameters = {o for r in stream_plan for o in r.output_objects if isinstance(o, OptimisticObject)}
initial_evaluations = {e: n for e, n in evaluations.items() if n.result == INIT_EVALUATION}
#initial_evaluations = evaluations
goal_facts = set()
for result in stream_plan:
goal_facts.update(filter(lambda f: evaluation_from_fact(f) not in
initial_evaluations, result.get_certified()))
visited_facts = set()
new_results = []
for fact in goal_facts:
retrace_instantiation(fact, optimizers, initial_evaluations, free_parameters, visited_facts, new_results)
# TODO: ensure correct ordering
new_results = list(filter(lambda r: isinstance(r, ComponentStream), new_results))
#from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
#node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort
#extract_stream_plan(node_from_atom, target_facts, stream_plan)
optimizer_results = []
for optimizer in {get_optimizer(r) for r in new_results}: # None is like a unique optimizer:
relevant_results = [r for r in new_results if get_optimizer(r) == optimizer]
optimizer_results.extend(combine_optimizer_plan(relevant_results, function_plan))
#print(str_from_object(set(map(fact_from_evaluation, evaluations))))
#print(str_from_object(set(goal_facts)))
# TODO: can do the flexibly sized optimizers search
from hsr_tamp.pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
optimizer_plan = reschedule_stream_plan(initial_evaluations, goal_facts, copy.copy(domain),
(stream_plan + optimizer_results), unique_binding=True)
if not is_plan(optimizer_plan):
return None
return optimizer_plan + function_plan
##################################################
def combine_optimizers_greedy(evaluations, external_plan):
if not is_plan(external_plan):
return external_plan
# The key thing is that a variable must be grounded before it can used in a non-stream thing
# TODO: construct variables in order
# TODO: graph cut algorithm to minimize the number of constraints that are excluded
# TODO: reorder to ensure that constraints are done first since they are likely to fail as tests
incoming_edges, outgoing_edges = neighbors_from_orders(get_partial_orders(external_plan))
queue = []
functions = []
for v in external_plan:
if not incoming_edges[v]:
(functions if isinstance(v, FunctionResult) else queue).append(v)
current = []
ordering = []
while queue:
optimizer = get_optimizer(current[-1]) if current else None
for v in queue:
if optimizer == get_optimizer(v):
current.append(v)
break
else:
ordering.extend(combine_optimizer_plan(current, functions))
current = [queue[0]]
v1 = current[-1]
queue.remove(v1)
for v2 in outgoing_edges[v1]:
incoming_edges[v2].remove(v1)
if not incoming_edges[v2]:
(functions if isinstance(v2, FunctionResult) else queue).append(v2)
ordering.extend(combine_optimizer_plan(current, functions))
return ordering + functions
| 8,831 |
Python
| 47.262295 | 117 | 0.660288 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/incremental.py
|
from collections import Counter
import time
from hsr_tamp.pddlstream.algorithms.algorithm import parse_problem
from hsr_tamp.pddlstream.algorithms.common import add_facts, add_certified, SolutionStore, UNKNOWN_EVALUATION
from hsr_tamp.pddlstream.algorithms.constraints import PlanConstraints
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem
from hsr_tamp.pddlstream.algorithms.instantiate_task import sas_from_pddl, instantiate_task
from hsr_tamp.pddlstream.algorithms.instantiation import Instantiator
from hsr_tamp.pddlstream.algorithms.search import abstrips_solve_from_task
from hsr_tamp.pddlstream.language.constants import is_plan
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl_plan
from hsr_tamp.pddlstream.language.attachments import has_attachments, compile_fluents_as_attachments, solve_pyplanners
from hsr_tamp.pddlstream.language.statistics import load_stream_statistics, write_stream_statistics
from hsr_tamp.pddlstream.language.temporal import solve_tfd, SimplifiedDomain
from hsr_tamp.pddlstream.language.write_pddl import get_problem_pddl
from hsr_tamp.pddlstream.utils import INF, Verbose, str_from_object, elapsed_time
UPDATE_STATISTICS = False
def solve_temporal(evaluations, goal_exp, domain, debug=False, **kwargs):
assert isinstance(domain, SimplifiedDomain)
problem = get_problem_pddl(evaluations, goal_exp, domain.pddl)
return solve_tfd(domain.pddl, problem, debug=debug)
def solve_sequential(evaluations, goal_exp, domain, unit_costs=False, debug=False, **search_args):
problem = get_problem(evaluations, goal_exp, domain, unit_costs)
task = task_from_domain_problem(domain, problem)
if has_attachments(domain):
with Verbose(debug):
instantiated = instantiate_task(task)
return solve_pyplanners(instantiated, **search_args)
sas_task = sas_from_pddl(task, debug=debug)
return abstrips_solve_from_task(sas_task, debug=debug, **search_args)
def solve_finite(evaluations, goal_exp, domain, **kwargs):
if isinstance(domain, SimplifiedDomain):
pddl_plan, cost = solve_temporal(evaluations, goal_exp, domain, **kwargs)
else:
pddl_plan, cost = solve_sequential(evaluations, goal_exp, domain, **kwargs)
plan = obj_from_pddl_plan(pddl_plan)
return plan, cost
##################################################
def process_instance(instantiator, store, instance, verbose=False): #, **complexity_args):
if instance.enumerated:
return []
start_time = time.time()
new_results, new_facts = instance.next_results(verbose=verbose)
store.sample_time += elapsed_time(start_time)
evaluations = store.evaluations
#remove_blocked(evaluations, instance, new_results)
for result in new_results:
complexity = result.compute_complexity(evaluations)
#complexity = instantiator.compute_complexity(instance)
for evaluation in add_certified(evaluations, result):
instantiator.add_atom(evaluation, complexity)
fact_complexity = 0 # TODO: record the instance or treat as initial?
for evaluation in add_facts(evaluations, new_facts, result=UNKNOWN_EVALUATION, complexity=fact_complexity):
instantiator.add_atom(evaluation, fact_complexity)
if not instance.enumerated:
instantiator.push_instance(instance)
return new_results
def process_stream_queue(instantiator, store, complexity_limit=INF, verbose=False):
instances = []
results = []
num_successes = 0
while not store.is_terminated() and instantiator and (instantiator.min_complexity() <= complexity_limit):
instance = instantiator.pop_stream()
if instance.enumerated:
continue
instances.append(instance)
new_results = process_instance(instantiator, store, instance, verbose=verbose)
results.extend(new_results)
num_successes += bool(new_results) # TODO: max_results?
if verbose:
print('Eager Calls: {} | Successes: {} | Results: {} | Counts: {}'.format(
len(instances), num_successes, len(results),
str_from_object(Counter(instance.external.name for instance in instances))))
return len(instances)
# def retrace_stream_plan(store, domain, goal_expression):
# # TODO: retrace the stream plan that supports the plan to find the certificate
# if store.best_plan is None:
# return None
# assert not domain.axioms
# from hsr_tamp.pddlstream.algorithms.downward import plan_preimage
# print(goal_expression)
# plan_preimage(store.best_plan, goal_expression)
# raise NotImplementedError()
##################################################
def solve_incremental(problem, constraints=PlanConstraints(),
unit_costs=False, success_cost=INF,
max_iterations=INF, max_time=INF, max_memory=INF,
initial_complexity=0, complexity_step=1, max_complexity=INF,
verbose=False, **search_kwargs):
"""
Solves a PDDLStream problem by alternating between applying all possible streams and searching
:param problem: a PDDLStream problem
:param constraints: PlanConstraints on the set of legal solutions
:param unit_costs: use unit action costs rather than numeric costs
:param success_cost: the exclusive (strict) upper bound on plan cost to successfully terminate
:param max_time: the maximum runtime
:param max_iterations: the maximum number of search iterations
:param max_memory: the maximum amount of memory
:param initial_complexity: the initial stream complexity limit
:param complexity_step: the increase in the stream complexity limit per iteration
:param max_complexity: the maximum stream complexity limit
:param verbose: if True, print the result of each stream application
:param search_kwargs: keyword args for the search subroutine
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan (INF if no plan), and evaluations is init expanded
using stream applications
"""
# max_complexity = 0 => current
# complexity_step = INF => exhaustive
# success_cost = terminate_cost = decision_cost
# TODO: warning if optimizers are present
evaluations, goal_expression, domain, externals = parse_problem(
problem, constraints=constraints, unit_costs=unit_costs)
store = SolutionStore(evaluations, max_time, success_cost, verbose, max_memory=max_memory) # TODO: include other info here?
if UPDATE_STATISTICS:
load_stream_statistics(externals)
static_externals = compile_fluents_as_attachments(domain, externals)
num_iterations = num_calls = 0
complexity_limit = initial_complexity
instantiator = Instantiator(static_externals, evaluations)
num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose)
while not store.is_terminated() and (num_iterations < max_iterations) and (complexity_limit <= max_complexity):
num_iterations += 1
print('Iteration: {} | Complexity: {} | Calls: {} | Evaluations: {} | Solved: {} | Cost: {:.3f} | '
'Search Time: {:.3f} | Sample Time: {:.3f} | Time: {:.3f}'.format(
num_iterations, complexity_limit, num_calls, len(evaluations),
store.has_solution(), store.best_cost, store.search_time, store.sample_time, store.elapsed_time()))
plan, cost = solve_finite(evaluations, goal_expression, domain,
max_cost=min(store.best_cost, constraints.max_cost), **search_kwargs)
if is_plan(plan):
store.add_plan(plan, cost)
if not instantiator:
break
if complexity_step is None:
# TODO: option to select the next k-smallest complexities
complexity_limit = instantiator.min_complexity()
else:
complexity_limit += complexity_step
num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose)
#retrace_stream_plan(store, domain, goal_expression)
#print('Final queue size: {}'.format(len(instantiator)))
summary = store.export_summary()
summary.update({
'iterations': num_iterations,
'complexity': complexity_limit,
})
print('Summary: {}'.format(str_from_object(summary, ndigits=3))) # TODO: return the summary
if UPDATE_STATISTICS:
write_stream_statistics(externals, verbose)
return store.extract_solution()
##################################################
def solve_immediate(problem, **kwargs):
"""
Solves a PDDLStream problem by searching only
INCOMPLETENESS WARNING: only use if no stream evaluations are necessarily (otherwise terminates early)
:param problem: a PDDLStream problem
:param kwargs: keyword args for solve_incremental
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
return solve_incremental(problem, start_complexity=0, complexity_step=0, max_complexity=0, **kwargs)
def solve_exhaustive(problem, **kwargs):
"""
Solves a PDDLStream problem by applying all possible streams and searching once
INCOMPLETENESS WARNING: only use if a finite set of instantiable stream instances (otherwise infinite loop)
:param problem: a PDDLStream problem
:param kwargs: keyword args for solve_incremental
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan, and evaluations is init but expanded
using stream applications
"""
return solve_incremental(problem, start_complexity=INF, complexity_step=INF, max_complexity=INF, **kwargs)
| 9,913 |
Python
| 49.324873 | 127 | 0.7012 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/skeleton.py
|
from __future__ import print_function
import time
from collections import namedtuple, Sized
from itertools import count
from heapq import heappush, heappop
from hsr_tamp.pddlstream.algorithms.common import is_instance_ready, compute_complexity, stream_plan_complexity, add_certified, \
stream_plan_preimage, COMPLEXITY_OP
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.algorithms.disabled import process_instance, update_bindings, update_cost, bind_action_plan
from hsr_tamp.pddlstream.algorithms.reorder import get_output_objects, get_object_orders, get_partial_orders, get_initial_orders
from hsr_tamp.pddlstream.language.constants import is_plan, INFEASIBLE, FAILED, SUCCEEDED
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.algorithms.visualization import visualize_stream_orders
from hsr_tamp.pddlstream.utils import elapsed_time, HeapElement, apply_mapping, INF, get_mapping, adjacent_from_edges, \
incoming_from_edges, outgoing_from_edges
# TODO: the bias away from solved things is actually due to USE_PRIORITIES+timed_process not REQUIRE_DOWNSTREAM
USE_PRIORITIES = True
GREEDY_VISITS = 0
GREEDY_BEST = True
REQUIRE_DOWNSTREAM = True
Priority = namedtuple('Priority', ['not_greedy', 'complexity', 'visits', 'remaining', 'cost']) # TODO: FIFO
Affected = namedtuple('Affected', ['indices', 'has_cost'])
def compute_affected_downstream(stream_plan, index):
# TODO: if the cost is pruned, then add everything that contributes, not just the last function
affected_indices = [index]
result = stream_plan[index]
has_cost = (type(result) is FunctionResult)
output_objects = set(get_output_objects(result))
if not output_objects: # TODO: should I do conditions instead?
return Affected(affected_indices, has_cost)
for index2 in range(index + 1, len(stream_plan)):
result2 = stream_plan[index2]
if output_objects & result2.instance.get_all_input_objects(): # TODO: get_object_orders
output_objects.update(get_output_objects(result2)) # TODO: just include directly affected?
affected_indices.append(index2)
has_cost |= (type(result2) is FunctionResult)
return Affected(affected_indices, has_cost)
def compute_affected_component(stream_plan, index):
# TODO: affected upstream
raise NotImplementedError()
##################################################
class Skeleton(object):
def __init__(self, queue, stream_plan, action_plan, cost):
# TODO: estimate statistics per stream_instance online and use to reorder the skeleton
self.queue = queue
self.index = len(self.queue.skeletons)
self.stream_plan = stream_plan
self.action_plan = action_plan
self.cost = cost
self.best_binding = None
self.improved = False
self.root = Binding(self, self.cost, history=[], mapping={}, index=0, parent=None, parent_result=None)
self.affected_indices = [compute_affected_downstream(self.stream_plan, index)
for index in range(len(self.stream_plan))]
stream_orders = get_partial_orders(self.stream_plan) # init_facts=self.queue.evaluations)
index_from_result = get_mapping(stream_plan, range(len(stream_plan)))
index_orders = {(index_from_result[r1], index_from_result[r2]) for r1, r2 in stream_orders}
preimage = stream_plan_preimage(stream_plan)
self.preimage_complexities = [[queue.evaluations[evaluation_from_fact(fact)].complexity
for fact in stream.get_domain() if fact in preimage] for stream in stream_plan]
self.incoming_indices = incoming_from_edges(index_orders)
self.outgoing_indices = outgoing_from_edges(index_orders)
#min_complexity = stream_plan_complexity(self.queue.evaluations, self.stream_plan, [0]*len(stream_plan))
# TODO: compute this all at once via hashing
def compute_complexity(self, stream_calls, complexities=[]):
# TODO: use the previous value when possible
assert len(stream_calls) == len(self.stream_plan)
start_index = len(complexities)
complexities = complexities + [0]*(len(stream_calls) - start_index)
for index in range(start_index, len(self.stream_plan)):
complexities[index] = self.compute_index_complexity(index, stream_calls[index], complexities)
return complexities
def compute_index_complexity(self, index, num_calls, complexities):
# TODO: automatically set the opt level to be zero for any streams that are bound (assuming not reachieve)
domain_complexity = COMPLEXITY_OP([0] + self.preimage_complexities[index] +
[complexities[index2] for index2 in self.incoming_indices[index]])
return domain_complexity + self.stream_plan[index].external.get_complexity(num_calls=num_calls)
def update_best(self, binding):
if (self.best_binding is None) or (self.best_binding.index < binding.index) or \
((self.best_binding.index == binding.index) and (binding.cost < self.best_binding.cost)):
self.best_binding = binding
#print('Skeleton {} | Progress: {} | New best: {}'.format(
# self.index, self.best_binding.index, self.best_binding))
self.improved = True
return True
return False
def bind_stream_result(self, index, mapping):
return self.stream_plan[index].remap_inputs(mapping) # Has optimistic output objects
def bind_action_plan(self, mapping):
return bind_action_plan(self.action_plan, mapping)
def visualize_bindings(self):
# TODO: remap outputs
orders = {(binding1.parent_result, binding2.parent_result)
for binding1, binding2 in self.root.get_connections()}
return visualize_stream_orders(orders)
##################################################
class Binding(object):
counter = count()
def __init__(self, skeleton, cost, history, mapping, index, parent, parent_result):
#def __init__(self, skeleton, cost=0., history=[], mapping={}, index=0, parent=None):
self.skeleton = skeleton
self.cost = cost
self.history = history
self.mapping = mapping
self.index = index
self.parent = parent
if self.parent is not None:
self.parent.children.append(self)
self.parent_result = parent_result
self.children = []
self._result = False
self.visits = 0 # The number of times _process_binding has been called
self.calls = 0 # The index for result_history
self.complexity = None
self.complexities = None
self.max_history = max(self.history) if self.history else 0
self.skeleton.update_best(self)
self.num = next(self.counter) # TODO: FIFO
@property
def is_fully_bound(self):
return self.index == len(self.skeleton.stream_plan)
@property
def result(self):
if self._result is False:
self._result = None
if not self.is_fully_bound:
self._result = self.skeleton.bind_stream_result(self.index, self.mapping)
return self._result
def is_best(self):
return self.skeleton.best_binding is self
def is_dominated(self):
return self.skeleton.queue.store.best_cost <= self.cost
def is_enumerated(self):
return self.is_fully_bound or self.result.enumerated
def is_unsatisfied(self):
return not self.children
def is_greedy(self):
return (self.visits <= GREEDY_VISITS) and (not GREEDY_BEST or self.is_best())
def up_to_date(self):
if self.is_fully_bound:
return True
#if REQUIRE_DOWNSTREAM:
# return self.result.instance.num_calls <= self.visits
#else:
return self.calls == self.result.instance.num_calls
def compute_complexity(self):
if self.is_fully_bound:
return 0
# TODO: use last if self.result.external.get_complexity(num_calls=INF) == 0
# TODO: intelligently compute/cache this - store parent stream_plan_complexity or compute formula per skeleton
if self.complexity is None:
full_history = self.history + [self.calls] # TODO: relevant history, full history, or future
future = full_history + [0]*(len(self.skeleton.stream_plan) - len(full_history))
parent_complexities = [0]*len(self.skeleton.stream_plan) if self.index == 0 else self.parent.complexities
if self.skeleton.outgoing_indices[self.index]:
self.complexities = self.skeleton.compute_complexity(future, complexities=parent_complexities[:self.index])
else:
self.complexities = list(parent_complexities)
self.complexities[self.index] = self.skeleton.compute_index_complexity(self.index, self.calls, self.complexities)
self.complexity = COMPLEXITY_OP(self.complexities)
#self.complexity = stream_plan_complexity(self.skeleton.queue.evaluations, self.skeleton.stream_plan, future)
return self.complexity
#return compute_complexity(self.skeleton.queue.evaluations, self.result.get_domain()) + \
# self.result.external.get_complexity(self.visits) # visits, calls
def check_complexity(self, complexity_limit=INF):
if complexity_limit == INF:
return True
if any(calls > complexity_limit for calls in [self.max_history, self.calls]): # + self.history
# Check lower bounds for efficiency purposes
return False
return self.compute_complexity() <= complexity_limit
def check_downstream_helper(self, affected):
if self.is_dominated():
# Keep exploring down branches that contain a cost term
return affected.has_cost
if self.is_unsatisfied(): # or type(self.result) == FunctionResult): # not self.visits
return self.index in affected.indices
# TODO: only prune functions here if the reset of the plan is feasible
#if not affected.indices or (max(affected.indices) < self.index):
# # Cut branch for efficiency purposes
# return False
# TODO: discard bindings that have been pruned by their cost per affected component
# TODO: both any and all weakly prune
return any(binding.check_downstream_helper(affected) for binding in self.children)
def check_downstream(self):
return self.check_downstream_helper(self.skeleton.affected_indices[self.index])
def get_priority(self):
if not USE_PRIORITIES:
return Priority(not_greedy=True, complexity=0, visits=self.visits, remaining=0, cost=0.)
# TODO: use effort instead
# TODO: instead of remaining, use the index in the queue to reprocess earlier ones
#priority = self.visits
#priority = self.compute_complexity()
priority = self.compute_complexity() + (self.visits - self.calls) # TODO: check this
# TODO: call_index
remaining = len(self.skeleton.stream_plan) - self.index
return Priority(not self.is_greedy(), priority, self.visits, remaining, self.cost)
def post_order(self):
for child in self.children:
for binding in child.post_order():
yield binding
yield self
def get_ancestors(self):
if self.parent is not None:
for ancestor in self.parent.get_ancestors():
yield ancestor
yield self
def get_connections(self):
# TODO: easier to just iterate over all bindings and extract the parent
connections = []
for child in self.children:
connections.append((self, child))
connections.extend(child.get_connections())
return connections
def recover_bound_results(self):
return [binding.parent_result for binding in list(self.get_ancestors())[1:]]
def update_bindings(self):
new_bindings = []
instance = self.result.instance
for call_idx in range(self.calls, instance.num_calls):
for new_result in instance.results_history[call_idx]: # TODO: don't readd if successful already
if new_result.is_successful():
new_bindings.append(Binding(
skeleton=self.skeleton,
cost=update_cost(self.cost, self.result, new_result),
history=self.history + [call_idx],
mapping=update_bindings(self.mapping, self.result, new_result),
index=self.index + 1, # TODO: history instead of results_history
parent=self,
parent_result=new_result))
self.calls = instance.num_calls
self.visits = max(self.visits, self.calls)
self.complexity = None # Forces re-computation
#self.skeleton.visualize_bindings()
return new_bindings
def __repr__(self):
return '{}(skeleton={}, {})'.format(self.__class__.__name__, self.skeleton.index, self.result)
##################################################
STANDBY = None
class SkeletonQueue(Sized):
def __init__(self, store, domain, disable=True):
# TODO: multi-threaded
self.store = store
self.domain = domain
self.skeletons = []
self.queue = [] # TODO: deque version
self.disable = disable
self.standby = []
@property
def evaluations(self):
return self.store.evaluations
def __len__(self):
return len(self.queue)
def is_active(self):
return self.queue and (not self.store.is_terminated())
def push_binding(self, binding):
# TODO: add to standby if not active
priority = binding.get_priority()
element = HeapElement(priority, binding)
heappush(self.queue, element)
def pop_binding(self):
priority, binding = heappop(self.queue)
#return binding
return priority, binding
def peak_binding(self):
if not self.queue:
return None
priority, binding = self.queue[0]
return priority, binding
def new_skeleton(self, stream_plan, action_plan, cost):
skeleton = Skeleton(self, stream_plan, action_plan, cost)
self.skeletons.append(skeleton)
self.push_binding(skeleton.root)
#self.greedily_process()
return skeleton
def readd_standby(self):
for binding in self.standby:
self.push_binding(binding)
self.standby = []
#########################
def _process_binding(self, binding):
assert binding.calls <= binding.visits # TODO: global DEBUG mode
readd = is_new = False
if binding.is_dominated():
return readd, is_new
if binding.is_fully_bound:
action_plan = binding.skeleton.bind_action_plan(binding.mapping)
self.store.add_plan(action_plan, binding.cost)
is_new = True
return readd, is_new
binding.visits += 1
instance = binding.result.instance
if (REQUIRE_DOWNSTREAM and not binding.check_downstream()): # TODO: move check_complexity here
# TODO: causes redundant plan skeletons to be identified (along with complexity using visits instead of calls)
# Do I need to re-enable this stream in case another skeleton needs it?
# TODO: should I perform this when deciding to sample something new instead?
return STANDBY, is_new
#if not is_instance_ready(self.evaluations, instance):
# raise RuntimeError(instance)
if binding.up_to_date():
new_results, _ = process_instance(self.store, self.domain, instance, disable=self.disable)
is_new = bool(new_results)
for new_binding in binding.update_bindings():
self.push_binding(new_binding)
readd = not instance.enumerated
return readd, is_new
#########################
def process_root(self):
_, binding = self.pop_binding()
readd, is_new = self._process_binding(binding)
if readd is not False:
self.push_binding(binding)
# TODO: if readd == STANDBY
return is_new
def greedily_process(self):
num_new = 0
while self.is_active():
priority, binding = self.peak_binding()
if not binding.is_greedy(): #priority.not_greedy:
break
num_new += self.process_root()
return num_new
def process_until_new(self, print_frequency=1.):
# TODO: process the entire queue for one pass instead
num_new = 0
if not self.is_active():
return num_new
print('Sampling until new output values')
iterations = 0
last_time = time.time()
while self.is_active() and (not num_new):
iterations += 1
_, binding = self.pop_binding()
readd, is_new = self._process_binding(binding)
if readd is True:
self.push_binding(binding)
elif readd is STANDBY:
self.standby.append(binding) # TODO: test for deciding whether to standby
num_new += is_new
if print_frequency <= elapsed_time(last_time):
print('Queue: {} | Iterations: {} | Time: {:.3f}'.format(
len(self.queue), iterations, elapsed_time(last_time)))
last_time = time.time()
self.readd_standby()
return num_new + self.greedily_process()
def process_complexity(self, complexity_limit):
# TODO: could copy the queue and filter instances that exceed complexity_limit
num_new = 0
if not self.is_active():
return num_new
print('Sampling while complexity <= {}'.format(complexity_limit))
while self.is_active():
_, binding = self.pop_binding()
if binding.check_complexity(complexity_limit): # not binding.up_to_date() or
readd, is_new = self._process_binding(binding)
num_new += is_new
if readd is not STANDBY:
if readd is True:
self.push_binding(binding)
continue
self.standby.append(binding)
self.readd_standby()
return num_new + self.greedily_process()
# TODO: increment the complexity level even more if nothing below in the queue
def timed_process(self, max_time=INF, max_iterations=INF):
# TODO: combine process methods into process_until
iterations = num_new = 0
if not self.is_active():
return num_new
print('Sampling for up to {:.3f} seconds'.format(max_time)) #, max_iterations))
start_time = time.time() # TODO: instead use sample_time
while self.is_active() and (elapsed_time(start_time) < max_time) and (iterations < max_iterations):
iterations += 1
num_new += self.process_root()
#print('Iterations: {} | New: {} | Time: {:.3f}'.format(iterations, num_new, elapsed_time(start_time)))
return num_new + self.greedily_process()
#########################
def accelerate_best_bindings(self, **kwargs):
# TODO: more generally reason about streams on several skeletons
# TODO: reset the complexity values for old streams
for skeleton in self.skeletons:
if not skeleton.improved:
continue
skeleton.improved = False
for result in skeleton.best_binding.recover_bound_results():
# TODO: just accelerate the facts within the plan preimage
#print(result, result.compute_complexity(self.evaluations, **kwargs))
result.call_index = 0 # Pretends the fact was first
#print(result.compute_complexity(self.evaluations, **kwargs))
add_certified(self.evaluations, result, **kwargs) # TODO: should special have a complexity of INF?
# TODO: AssertionError: Could not find instantiation for numeric expression: dist
def process(self, stream_plan, action_plan, cost, complexity_limit, max_time=0, accelerate=False):
start_time = time.time()
if is_plan(stream_plan):
self.new_skeleton(stream_plan, action_plan, cost)
self.greedily_process()
elif (stream_plan is INFEASIBLE) and not self.process_until_new():
# Move this after process_complexity
return INFEASIBLE
if not self.queue:
return FAILED
# TODO: add and process
self.timed_process(max_time=(max_time - elapsed_time(start_time)))
self.process_complexity(complexity_limit)
if accelerate:
self.accelerate_best_bindings()
return FAILED
| 21,064 |
Python
| 46.337079 | 129 | 0.629747 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/satisfaction.py
|
from __future__ import print_function
import time
from collections import Counter, namedtuple
from hsr_tamp.pddlstream.algorithms.algorithm import parse_stream_pddl, evaluations_from_init
from hsr_tamp.pddlstream.algorithms.common import SolutionStore
from hsr_tamp.pddlstream.algorithms.disable_skeleton import create_disabled_axioms, extract_disabled_clusters
from hsr_tamp.pddlstream.algorithms.downward import make_domain, make_predicate, add_predicate
from hsr_tamp.pddlstream.algorithms.recover_optimizers import retrace_instantiation, combine_optimizers
from hsr_tamp.pddlstream.algorithms.reorder import reorder_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
# from hsr_tamp.pddlstream.algorithms.skeleton import SkeletonQueue
from hsr_tamp.pddlstream.algorithms.skeleton import SkeletonQueue
from hsr_tamp.pddlstream.language.constants import is_parameter, get_length, partition_facts, Assignment, OptPlan
from hsr_tamp.pddlstream.language.conversion import revert_solution, \
evaluation_from_fact, replace_expression, get_prefix, get_args
from hsr_tamp.pddlstream.language.function import Function
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.language.statistics import write_stream_statistics, compute_plan_effort
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.algorithms.visualization import visualize_constraints
from hsr_tamp.pddlstream.utils import INF, get_mapping, elapsed_time, str_from_object, safe_zip
# TODO: ConstraintProblem?
SatisfactionProblem = namedtuple('SatisfactionProblem', ['stream_pddl', 'stream_map', 'init', 'terms'])
SatisfactionSolution = namedtuple('SatisfactionSolution', ['bindings', 'cost', 'facts'])
##################################################
def parse_value(value):
return OptimisticObject.from_opt(value, value) if is_parameter(value) else Object.from_value(value)
def obj_from_existential_expression(parent): # obj_from_value_expression
return replace_expression(parent, parse_value)
def create_domain(goal_facts):
domain = make_domain()
for fact in goal_facts: # TODO: consider removing this annoying check
name = get_prefix(fact)
parameters = ['?x{}'.format(i) for i in range(len(get_args(fact)))]
add_predicate(domain, make_predicate(name, parameters))
return domain
def plan_functions(functions, externals):
external_from_function = {}
for external in filter(lambda e: isinstance(e, Function), externals):
assert external.function not in external_from_function
external_from_function[external.function] = external
function_plan = set()
for term in functions:
if get_prefix(term) not in external_from_function:
raise ValueError('{} is not implemented'.format(get_prefix(term)))
external = external_from_function[get_prefix(term)]
instance = external.get_instance(get_args(term))
[result] = instance.next_optimistic()
function_plan.add(result)
print('Function plan:', str_from_object(function_plan))
return function_plan
def get_parameters(goal_facts):
return {o for f in goal_facts for o in get_args(f) if isinstance(o, OptimisticObject)}
def extract_streams(evaluations, externals, goal_facts):
streams = list(filter(lambda e: isinstance(e, Stream), externals))
free_parameters = get_parameters(goal_facts)
visited_facts = set()
stream_results = []
for fact in goal_facts:
# TODO: prune results that already exceed effort limit
retrace_instantiation(fact, streams, evaluations, free_parameters, visited_facts, stream_results)
print('Streams:', stream_results)
# TODO: express some of this pruning using effort (e.g. unlikely to sample bound value)
return stream_results
def get_optimistic_cost(function_plan):
return sum([0.] + [result.value for result in function_plan
if type(result.external) == Function])
def bindings_from_plan(plan_skeleton, action_plan):
if action_plan is None:
return None
bindings = {}
for (args1,), (args2,) in safe_zip(plan_skeleton, action_plan):
parameter_names = [o.value for o in args1]
bindings.update(get_mapping(parameter_names, args2))
return bindings
def are_domainated(clusters1, clusters2):
return all(any(c1 <= c2 for c2 in clusters2) for c1 in clusters1)
def dump_assignment(solution):
bindings, cost, evaluations = solution
print()
print('Solved: {}'.format(bindings is not None))
print('Cost: {:.3f}'.format(cost))
print('Total facts: {}'.format(len(evaluations)))
print('Fact counts: {}'.format(str_from_object(Counter(map(get_prefix, evaluations.all_facts))))) # preimage_facts
if bindings is None:
return
print('Assignments:')
for param in sorted(bindings):
print('{} = {}'.format(param, str_from_object(bindings[param])))
def visualize_problem(problem, **kwargs):
stream_pddl, stream_map, init, terms = problem
terms = set(map(obj_from_existential_expression, terms))
return visualize_constraints(terms, **kwargs)
##################################################
def constraint_satisfaction(problem, stream_info={},
costs=True, max_cost=INF, success_cost=INF, max_time=INF,
unit_efforts=False, max_effort=INF,
max_skeletons=INF, search_sample_ratio=1, verbose=True, **search_args):
# Approaches
# 1) Existential quantification of bindings in goal conditions
# 2) Backtrack useful streams and then schedule. Create arbitrary outputs for not mentioned.
# 3) Construct all useful streams and then associate outputs with bindings
# Useful stream must satisfy at least one fact. How should these assignments be propagated though?
# Make an action that maps each stream result to unbound values?
# TODO: include functions again for cost-sensitive satisfaction
# TODO: convert init into streams to bind certain facts
# TODO: investigate constraint satisfaction techniques for binding instead
# TODO: could also instantiate all possible free parameters even if not useful
# TODO: effort that is a function of the number of output parameters (degrees of freedom)
# TODO: use a CSP solver instead of a planner internally
# TODO: max_iterations?
stream_pddl, stream_map, init, terms = problem
if not terms:
return SatisfactionSolution({}, 0, init)
constraints, negated, functions = partition_facts(set(map(obj_from_existential_expression, terms)))
if not costs:
functions = []
evaluations = evaluations_from_init(init)
goal_facts = set(filter(lambda f: evaluation_from_fact(f) not in evaluations, constraints))
free_parameters = sorted(get_parameters(goal_facts))
print('Parameters:', free_parameters)
externals = parse_stream_pddl(stream_pddl, stream_map, stream_info, unit_efforts=unit_efforts)
stream_results = extract_streams(evaluations, externals, goal_facts)
function_plan = plan_functions(negated + functions, externals)
plan_skeleton = [Assignment(free_parameters)]
cost = get_optimistic_cost(function_plan)
if max_cost < cost:
return SatisfactionSolution(None, INF, init)
# TODO: detect connected components
# TODO: eagerly evaluate fully bound constraints
# TODO: consider other results if this fails
domain = create_domain(goal_facts)
init_evaluations = evaluations.copy()
store = SolutionStore(evaluations, max_time=max_time, success_cost=success_cost, verbose=verbose)
queue = SkeletonQueue(store, domain, disable=False)
num_iterations = search_time = sample_time = 0
planner = 'ff-astar' # TODO: toggle within reschedule_stream_plan
#last_clusters = set()
#last_success = True
while not store.is_terminated():
num_iterations += 1
start_time = time.time()
print('\nIteration: {} | Skeletons: {} | Skeleton Queue: {} | Evaluations: {} | '
'Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'.format(
num_iterations, len(queue.skeletons), len(queue),
len(evaluations), store.best_cost, search_time, sample_time, store.elapsed_time()))
external_plan = None
if len(queue.skeletons) < max_skeletons:
domain.axioms[:] = create_disabled_axioms(queue, use_parameters=False)
#dominated = are_domainated(last_clusters, clusters)
#last_clusters = clusters
#if last_success or not dominated: # Could also keep a history of results
stream_plan = reschedule_stream_plan(init_evaluations, goal_facts, domain, stream_results,
unique_binding=True, unsatisfiable=True,
max_effort=max_effort, planner=planner, **search_args)
if stream_plan is not None:
external_plan = reorder_stream_plan(store, combine_optimizers(
init_evaluations, stream_plan + list(function_plan)))
print('Stream plan ({}, {:.3f}): {}'.format(
get_length(external_plan), compute_plan_effort(external_plan), external_plan))
last_success = (external_plan is not None)
search_time += elapsed_time(start_time)
# Once a constraint added for a skeleton, it should only be relaxed
start_time = time.time()
if last_success: # Only works if create_disable_axioms never changes
allocated_sample_time = (search_sample_ratio * search_time) - sample_time
else:
allocated_sample_time = INF
queue.process(external_plan, OptPlan(plan_skeleton, []), cost=cost, # TODO: fill in preimage facts
complexity_limit=INF, max_time=allocated_sample_time)
sample_time += elapsed_time(start_time)
if not last_success and not queue:
break
# TODO: exhaustively compute all plan skeletons and add to queue within the focused algorithm
write_stream_statistics(externals, verbose)
action_plan, cost, facts = revert_solution(store.best_plan, store.best_cost, evaluations)
bindings = bindings_from_plan(plan_skeleton, action_plan)
return SatisfactionSolution(bindings, cost, facts)
| 10,454 |
Python
| 50.757425 | 118 | 0.692462 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/algorithm.py
|
from collections import Counter
from hsr_tamp.pddlstream.algorithms.common import evaluations_from_init, SOLUTIONS
from hsr_tamp.pddlstream.algorithms.constraints import add_plan_constraints
from hsr_tamp.pddlstream.algorithms.downward import parse_lisp, parse_goal, has_costs, set_unit_costs, normalize_domain_goal
from hsr_tamp.pddlstream.language.temporal import parse_domain, SimplifiedDomain
from hsr_tamp.pddlstream.language.constants import get_prefix, get_args
from hsr_tamp.pddlstream.language.conversion import obj_from_value_expression
from hsr_tamp.pddlstream.language.exogenous import compile_to_exogenous
from hsr_tamp.pddlstream.language.external import External
from hsr_tamp.pddlstream.language.function import parse_function, parse_predicate
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.language.optimizer import parse_optimizer
from hsr_tamp.pddlstream.language.rule import parse_rule, apply_rules_to_streams, RULES
from hsr_tamp.pddlstream.language.stream import parse_stream, Stream, StreamInstance
from hsr_tamp.pddlstream.utils import INF
# TODO: rename file to parsing
def parse_constants(domain, constant_map):
obj_from_constant = {}
for constant in domain.constants:
if constant.name.startswith(Object._prefix): # TODO: check other prefixes
raise NotImplementedError('Constants are not currently allowed to begin with {}'.format(Object._prefix))
if constant.name not in constant_map:
raise ValueError('Undefined constant {}'.format(constant.name))
value = constant_map.get(constant.name, constant.name)
obj_from_constant[constant.name] = Object(value, name=constant.name) # TODO: remap names
# TODO: add object predicate
for name in constant_map:
for constant in domain.constants:
if constant.name == name:
break
else:
raise ValueError('Constant map value {} not mentioned in domain :constants'.format(name))
del domain.constants[:] # So not set twice
return obj_from_constant
def check_problem(domain, streams, obj_from_constant):
for action in (domain.actions + domain.axioms):
for p, c in Counter(action.parameters).items():
if c != 1:
raise ValueError('Parameter [{}] for action [{}] is not unique'.format(p.name, action.name))
# TODO: check that no undeclared parameters & constants
#action.dump()
undeclared_predicates = set()
for stream in streams:
# TODO: domain.functions
facts = list(stream.domain)
if isinstance(stream, Stream):
facts.extend(stream.certified)
for fact in facts:
name = get_prefix(fact)
if name not in domain.predicate_dict:
undeclared_predicates.add(name)
elif len(get_args(fact)) != domain.predicate_dict[name].get_arity(): # predicate used with wrong arity: {}
print('Warning! predicate used with wrong arity in stream [{}]: {}'.format(stream.name, fact))
# for constant in stream.constants:
# if constant not in obj_from_constant:
# raise ValueError('Undefined constant in stream [{}]: {}'.format(stream.name, constant))
if undeclared_predicates:
print('Warning! Undeclared predicates: {}'.format(
sorted(undeclared_predicates))) # Undeclared predicate: {}
def reset_globals():
# TODO: maintain these dictionaries in an object
Object.reset()
OptimisticObject.reset()
RULES[:] = []
SOLUTIONS[:] = []
def parse_problem(problem, stream_info={}, constraints=None, unit_costs=False, unit_efforts=False):
# TODO: just return the problem if already written programmatically
#reset_globals() # Prevents use of satisfaction.py
domain_pddl, constant_map, stream_pddl, stream_map, init, goal = problem
domain = parse_domain(domain_pddl) # TODO: normalize here
#domain = domain_pddl
if len(domain.types) != 1:
raise NotImplementedError('Types are not currently supported')
if unit_costs:
set_unit_costs(domain)
if not has_costs(domain):
# TODO: set effort_weight to 1 if no costs
print('Warning! All actions have no cost. Recommend setting unit_costs=True')
obj_from_constant = parse_constants(domain, constant_map) # Keep before parse_stream_pddl
streams = parse_stream_pddl(stream_pddl, stream_map, stream_info=stream_info,
unit_costs=unit_costs, unit_efforts=unit_efforts)
check_problem(domain, streams, obj_from_constant)
evaluations = evaluations_from_init(init)
goal_exp = obj_from_value_expression(goal)
if isinstance(domain, SimplifiedDomain):
#assert isinstance(domain, str) # raw PDDL is returned
_ = {name: Object(value, name=name) for name, value in constant_map.items()}
return evaluations, goal_exp, domain, streams
goal_exp = add_plan_constraints(constraints, domain, evaluations, goal_exp)
parse_goal(goal_exp, domain) # Just to check that it parses
normalize_domain_goal(domain, goal_exp) # TODO: does not normalize goal_exp
compile_to_exogenous(evaluations, domain, streams)
return evaluations, goal_exp, domain, streams
##################################################
def parse_streams(streams, rules, stream_pddl, procedure_map, procedure_info, use_functions=True):
stream_iter = iter(parse_lisp(stream_pddl))
assert('define' == next(stream_iter))
pddl_type, pddl_name = next(stream_iter)
assert('stream' == pddl_type)
for lisp_list in stream_iter:
name = lisp_list[0] # TODO: refactor at this point
if name == ':stream':
externals = [parse_stream(lisp_list, procedure_map, procedure_info)]
elif name == ':rule':
externals = [parse_rule(lisp_list, procedure_map, procedure_info)]
elif name == ':function':
if not use_functions:
continue
externals = [parse_function(lisp_list, procedure_map, procedure_info)]
elif name == ':predicate': # Cannot just use args if want a bound
externals = [parse_predicate(lisp_list, procedure_map, procedure_info)]
elif name == ':optimizer':
externals = parse_optimizer(lisp_list, procedure_map, procedure_info)
else:
raise ValueError(name)
for external in externals:
if any(e.name == external.name for e in streams):
raise ValueError('Stream [{}] is not unique'.format(external.name))
if name == ':rule':
rules.append(external)
external.pddl_name = pddl_name # TODO: move within constructors
streams.append(external)
def set_unit_efforts(externals):
for external in externals:
if external.get_effort() < INF:
external.info.effort = 1
NO_INFO = None
RELATIONAL_INFO = 'relational_info' # structural_info
STATISTICS_INFO = 'statistics_info'
def parse_stream_pddl(stream_pddl, stream_map, stream_info={}, unit_costs=False, unit_efforts=False):
if stream_info is None: # NO_INFO
stream_info = {}
externals = []
if stream_pddl is None:
return externals # No streams
if isinstance(stream_pddl, str):
stream_pddl = [stream_pddl]
if all(isinstance(e, External) for e in stream_pddl):
return stream_pddl
if isinstance(stream_map, dict): # DEBUG_MODES
stream_map = {k.lower(): v for k, v in stream_map.items()}
stream_info = {k.lower(): v for k, v in stream_info.items()}
rules = []
for pddl in stream_pddl:
# TODO: check which functions are actually used and prune the rest
parse_streams(externals, rules, pddl, stream_map, stream_info, use_functions=not unit_costs)
apply_rules_to_streams(rules, externals)
if unit_efforts:
set_unit_efforts(externals)
return externals
##################################################
def remove_blocked(evaluations, domain, instance, new_results):
# TODO: finish refactoring this
if new_results and isinstance(instance, StreamInstance):
instance.enable(evaluations, domain)
| 8,253 |
Python
| 45.370786 | 124 | 0.668363 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/disabled.py
|
import time
from hsr_tamp.pddlstream.algorithms.common import add_facts, add_certified, is_instance_ready, UNKNOWN_EVALUATION
from hsr_tamp.pddlstream.algorithms.algorithm import remove_blocked
from hsr_tamp.pddlstream.language.constants import OptPlan
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.stream import StreamResult
from hsr_tamp.pddlstream.language.conversion import is_plan, transform_action_args, replace_expression
from hsr_tamp.pddlstream.utils import INF, safe_zip, apply_mapping, flatten, elapsed_time
# TODO: disabled isn't quite like complexity. Stream instances below the complexity threshold might be called again
# Well actually, if this was true wouldn't it have already been sampled on a lower level?
def update_bindings(bindings, opt_result, result):
if not isinstance(result, StreamResult):
return bindings
new_bindings = bindings.copy()
for opt, obj in safe_zip(opt_result.output_objects, result.output_objects):
assert new_bindings.get(opt, obj) == obj # TODO: return failure if conflicting bindings
new_bindings[opt] = obj
return new_bindings
def update_cost(cost, opt_result, result):
# TODO: recompute optimistic costs to attempt to produce a tighter bound
if type(result) is not FunctionResult:
return cost
return cost + (result.value - opt_result.value)
def bind_action_plan(opt_plan, mapping):
fn = lambda o: mapping.get(o, o)
new_action_plan = [transform_action_args(action, fn)
for action in opt_plan.action_plan]
new_preimage_facts = frozenset(replace_expression(fact, fn)
for fact in opt_plan.preimage_facts)
return OptPlan(new_action_plan, new_preimage_facts)
def get_free_objects(stream_plan):
return set(flatten(result.output_objects for result in stream_plan
if isinstance(result, StreamResult)))
##################################################
def push_disabled(instantiator, disabled):
for instance in list(disabled):
if instance.enumerated:
disabled.remove(instance)
else:
# TODO: only add if not already queued
instantiator.push_instance(instance)
def reenable_disabled(evaluations, domain, disabled):
for instance in disabled:
instance.enable(evaluations, domain)
disabled.clear()
def process_instance(store, domain, instance, disable=False):
if instance.enumerated:
return [], []
start_time = time.time()
new_results, new_facts = instance.next_results(verbose=store.verbose)
store.sample_time += elapsed_time(start_time)
evaluations = store.evaluations
if disable:
instance.disable(evaluations, domain)
for result in new_results:
#add_certified(evaluations, result) # TODO: only add if the fact is actually new?
complexity = INF if (not disable or result.external.is_special) else \
result.compute_complexity(evaluations)
add_facts(evaluations, result.get_certified(), result=result, complexity=complexity)
if disable:
remove_blocked(evaluations, domain, instance, new_results)
add_facts(evaluations, new_facts, result=UNKNOWN_EVALUATION, complexity=0) # TODO: record the instance
return new_results, new_facts
##################################################
def process_stream_plan(store, domain, disabled, stream_plan, action_plan, cost,
bind=True, max_failures=0):
# Bad old implementation of this method
# The only advantage of this vs skeleton is that this can avoid the combinatorial growth in bindings
if not is_plan(stream_plan):
return
if not stream_plan:
store.add_plan(action_plan, cost)
return
stream_plan = [result for result in stream_plan if result.optimistic]
free_objects = get_free_objects(stream_plan)
bindings = {}
bound_plan = []
num_wild = 0
for idx, opt_result in enumerate(stream_plan):
if (store.best_cost <= cost) or (max_failures < (idx - len(bound_plan))):
# TODO: this terminates early when bind=False
break
opt_inputs = [inp for inp in opt_result.instance.input_objects if inp in free_objects]
if (not bind and opt_inputs) or not all(inp in bindings for inp in opt_inputs):
continue
bound_result = opt_result.remap_inputs(bindings)
bound_instance = bound_result.instance
if bound_instance.enumerated or not is_instance_ready(store.evaluations, bound_instance):
continue
# TODO: could remove disabled and just use complexity_limit
new_results, new_facts = process_instance(store, domain, bound_instance) # TODO: bound_result
num_wild += len(new_facts)
if not bound_instance.enumerated:
disabled.add(bound_instance)
for new_result in new_results:
if new_result.is_successful():
bound_plan.append(new_results[0])
bindings = update_bindings(bindings, bound_result, bound_plan[-1])
cost = update_cost(cost, opt_result, bound_plan[-1])
break
if (num_wild == 0) and (len(stream_plan) == len(bound_plan)):
store.add_plan(bind_action_plan(action_plan, bindings), cost)
# TODO: report back whether to try w/o optimistic values in the event that wild
##################################################
# def process_stream_plan_branch(store, domain, disabled, stream_plan, action_plan, cost):
# if not is_plan(stream_plan):
# return
# stream_plan = [result for result in stream_plan if result.optimistic]
# if not stream_plan:
# store.add_plan(action_plan, cost)
# return
# free_objects = get_free_objects(stream_plan)
# bindings = defaultdict(set)
# for opt_result in stream_plan:
# opt_inputs = [inp for inp in opt_result.instance.input_objects if inp in free_objects]
# inp_bindings = [bindings[inp] for inp in opt_inputs]
# for combo in product(*inp_bindings):
# bound_result = opt_result.remap_inputs(get_mapping(opt_inputs, combo))
# bound_instance = bound_result.instance
# if bound_instance.enumerated or not is_instance_ready(store.evaluations, bound_instance):
# continue # Disabled
# new_results = process_instance(store, domain, bound_instance)
# if not bound_instance.enumerated:
# disabled.add(bound_instance)
# if isinstance(opt_result, StreamResult):
# for new_result in new_results:
# for out, obj in safe_zip(opt_result.output_objects, new_result.output_objects):
# bindings[out].add(obj)
# #Binding = namedtuple('Binding', ['index', 'mapping'])
# # TODO: after querying, search over all bindings of the produced sampled
| 6,986 |
Python
| 47.186207 | 115 | 0.656885 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/meta.py
|
import argparse
import time
from collections import defaultdict
from hsr_tamp.pddlstream.algorithms.algorithm import parse_problem
from hsr_tamp.pddlstream.algorithms.common import evaluations_from_init
from hsr_tamp.pddlstream.algorithms.constraints import PlanConstraints
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem, fact_from_fd, fd_from_fact, \
fd_from_evaluations, INTERNAL_AXIOM
from hsr_tamp.pddlstream.algorithms.incremental import solve_incremental
from hsr_tamp.pddlstream.algorithms.focused import solve_focused_original, solve_binding, solve_adaptive, get_negative_externals
from hsr_tamp.pddlstream.algorithms.instantiate_task import instantiate_task, convert_instantiated
from hsr_tamp.pddlstream.algorithms.refinement import optimistic_process_streams
from hsr_tamp.pddlstream.algorithms.scheduling.reinstantiate import reinstantiate_axiom
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import evaluations_from_stream_plan
from hsr_tamp.pddlstream.language.constants import is_plan, Certificate, PDDLProblem, get_prefix, Solution
from hsr_tamp.pddlstream.language.conversion import value_from_obj_expression, EQ
from hsr_tamp.pddlstream.language.external import DEBUG, SHARED_DEBUG
from hsr_tamp.pddlstream.language.stream import PartialInputs
from hsr_tamp.pddlstream.language.temporal import SimplifiedDomain
from hsr_tamp.pddlstream.utils import elapsed_time, INF, Verbose, irange, SEPARATOR
FOCUSED_ALGORITHMS = ['focused', 'binding', 'adaptive']
ALGORITHMS = ['incremental'] + FOCUSED_ALGORITHMS
DEFAULT_ALGORITHM = 'adaptive'
##################################################
def create_parser(default_algorithm=DEFAULT_ALGORITHM):
# https://docs.python.org/3/library/argparse.html#the-add-argument-method
parser = argparse.ArgumentParser() # Automatically includes help
parser.add_argument('-a', '--algorithm', type=str, default=default_algorithm, choices=ALGORITHMS, required=False,
help='Specifies the PDDLStream algorithm to use')
parser.add_argument('-u', '--unit', action='store_true', help='Uses unit costs') # --unit_costs
# args = parser.parse_args()
# print('Arguments:', args)
# TODO: search planner, debug
# TODO: method that calls solve with args
return parser
##################################################
def solve(problem, algorithm=DEFAULT_ALGORITHM, constraints=PlanConstraints(),
stream_info={}, replan_actions=set(),
unit_costs=False, success_cost=INF,
max_time=INF, max_iterations=INF, max_memory=INF,
initial_complexity=0, complexity_step=1, max_complexity=INF,
max_skeletons=INF, search_sample_ratio=1, max_failures=0,
unit_efforts=False, max_effort=INF, effort_weight=None, reorder=True,
#temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[],
#planner=DEFAULT_PLANNER, max_planner_time=DEFAULT_MAX_TIME, max_cost=INF, debug=False
visualize=False, verbose=True, **search_kwargs):
"""
Solves a PDDLStream problem generically using one of the available algorithms
:param problem: a PDDLStream problem
:param algorithm: a PDDLStream algorithm name
:param constraints: PlanConstraints on the set of legal solutions
:param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled
:param replan_actions: the actions declared to induce replanning for the purpose of deferred stream evaluation
:param unit_costs: use unit action costs rather than numeric costs
:param success_cost: the exclusive (strict) upper bound on plan cost to successfully terminate
:param max_time: the maximum runtime
:param max_iterations: the maximum number of search iterations
:param max_memory: the maximum amount of memory
:param initial_complexity: the initial stream complexity limit
:param complexity_step: the increase in the stream complexity limit per iteration
:param max_complexity: the maximum stream complexity limit
:param max_skeletons: the maximum number of plan skeletons (max_skeletons=None indicates not adaptive)
:param search_sample_ratio: the desired ratio of sample time / search time when max_skeletons!=None
:param max_failures: the maximum number of stream failures before switching phases when max_skeletons=None
:param unit_efforts: use unit stream efforts rather than estimated numeric efforts
:param max_effort: the maximum amount of stream effort
:param effort_weight: a multiplier for stream effort compared to action costs
:param reorder: if True, reorder stream plans to minimize the expected sampling overhead
:param visualize: if True, draw the constraint network and stream plan as a graphviz file
:param verbose: if True, print the result of each stream application
:param search_kwargs: keyword args for the search subroutine
:return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
(or None), cost is the cost of the plan (INF if no plan), and evaluations is init expanded
using stream applications
"""
# TODO: print the arguments using locals()
# TODO: could instead make common arguments kwargs but then they could have different default values
# TODO: portfolios of PDDLStream algorithms
if algorithm == 'incremental':
return solve_incremental(
problem=problem, constraints=constraints,
unit_costs=unit_costs, success_cost=success_cost,
max_iterations=max_iterations, max_time=max_time, max_memory=max_memory,
initial_complexity=initial_complexity, complexity_step=complexity_step, max_complexity=max_complexity,
verbose=verbose, **search_kwargs)
# if algorithm == 'abstract_focused': # meta_focused | meta_focused
# return solve_focused(
# problem, constraints=constraints,
# stream_info=stream_info, replan_actions=replan_actions,
# unit_costs=unit_costs, success_cost=success_cost,
# max_time=max_time, max_iterations=max_iterations, max_memory=max_memory,
# initial_complexity=initial_complexity, complexity_step=complexity_step, #max_complexity=max_complexity,
# max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
# bind=bind, max_failures=max_failures,
# unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, reorder=reorder,
# visualize=visualize, verbose=verbose, **search_kwargs)
fail_fast = (max_failures < INF)
if algorithm == 'focused':
return solve_focused_original(
problem, constraints=constraints,
stream_info=stream_info, replan_actions=replan_actions,
unit_costs=unit_costs, success_cost=success_cost,
max_time=max_time, max_iterations=max_iterations, max_memory=max_memory,
initial_complexity=initial_complexity, complexity_step=complexity_step, max_complexity=max_complexity,
# max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
fail_fast=fail_fast, # bind=bind, max_failures=max_failures,
unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, reorder=reorder,
visualize=visualize, verbose=verbose, **search_kwargs)
if algorithm == 'binding':
return solve_binding(
problem, constraints=constraints,
stream_info=stream_info, replan_actions=replan_actions,
unit_costs=unit_costs, success_cost=success_cost,
max_time=max_time, max_iterations=max_iterations, max_memory=max_memory,
initial_complexity=initial_complexity, complexity_step=complexity_step, max_complexity=max_complexity,
# max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
fail_fast=fail_fast, # bind=bind, max_failures=max_failures,
unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, reorder=reorder,
visualize=visualize, verbose=verbose, **search_kwargs)
if algorithm == 'adaptive':
return solve_adaptive(
problem, constraints=constraints,
stream_info=stream_info, replan_actions=replan_actions,
unit_costs=unit_costs, success_cost=success_cost,
max_time=max_time, max_iterations=max_iterations, max_memory=max_memory,
initial_complexity=initial_complexity, complexity_step=complexity_step, max_complexity=max_complexity,
max_skeletons=max_skeletons, search_sample_ratio=search_sample_ratio,
# bind=bind, max_failures=max_failures,
unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, reorder=reorder,
visualize=visualize, verbose=verbose, **search_kwargs)
raise NotImplementedError(algorithm)
##################################################
def solve_restart(problem, max_time=INF, max_restarts=0, iteration_time=INF, abort=True, **kwargs):
# TODO: iteratively lower the cost bound
# TODO: a sequence of different planner configurations
# TODO: reset objects and/or streams
if (max_restarts >= 1) and (iteration_time == INF):
iteration_time = min(2 * 60, iteration_time)
assert (max_restarts == 0) or (iteration_time != INF)
assert max_restarts >= 0
start_time = time.time()
for attempt in irange(1+max_restarts):
iteration_start_time = time.time()
if elapsed_time(start_time) > max_time:
break
if attempt >= 1:
print(SEPARATOR)
# solution = planner_fn(problem) # Or include the problem in the lambda
remaining_time = min(iteration_time, max_time-elapsed_time(start_time))
solution = solve(problem, max_time=remaining_time, **kwargs)
plan, cost, certificate = solution
if is_plan(plan): # TODO: INFEASIBLE
return solution
if abort and (elapsed_time(iteration_start_time) < remaining_time):
break # TODO: return the cause of failure
certificate = Certificate(all_facts=[], preimage_facts=[]) # TODO: aggregate
return Solution(None, INF, certificate)
##################################################
def set_unique(externals):
for external in externals:
external.info.opt_gen_fn = PartialInputs(unique=True)
external.num_opt_fns = 0
def examine_instantiated(problem, unique=False, normalize=True, unit_costs=False, verbose=False, debug=False, **kwargs):
# TODO: refactor to an analysis file
domain_pddl, constant_map, stream_pddl, _, init, goal = problem
stream_map = DEBUG if unique else SHARED_DEBUG # DEBUG_MODES
problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal)
evaluations, goal_exp, domain, externals = parse_problem(problem, **kwargs)
assert not isinstance(domain, SimplifiedDomain)
negative = get_negative_externals(externals)
externals = list(filter(lambda s: s not in negative, externals))
# store = SolutionStore(evaluations, max_time, success_cost=INF, verbose=verbose)
# instantiator = Instantiator(externals, evaluations)
# process_stream_queue(instantiator, store, complexity_limit=INF, verbose=verbose)
# results = [] # TODO: extract from process_stream_queue
# set_unique(externals)
# domain.actions[:] = [] # TODO: only instantiate axioms
# TODO: drop all fluents and instantiate
# TODO: relaxed planning version of this
results, exhausted = optimistic_process_streams(evaluations, externals, complexity_limit=INF, max_effort=None)
evaluations = evaluations_from_stream_plan(evaluations, results, max_effort=None)
problem = get_problem(evaluations, goal_exp, domain, unit_costs)
task = task_from_domain_problem(domain, problem)
with Verbose(debug):
instantiated = instantiate_task(task, check_infeasible=False)
if instantiated is None:
return results, None
# TODO: reinstantiate actions?
instantiated.axioms[:] = [reinstantiate_axiom(axiom) for axiom in instantiated.axioms]
if normalize:
instantiated = convert_instantiated(instantiated)
return results, instantiated
# sas_task = sas_from_pddl(task, debug=debug)
##################################################
def iterate_subgoals(goals, axiom_from_effect):
necessary = set()
possible = set()
for goal in goals:
if goal in axiom_from_effect:
necessary.update(set.intersection(*[set(axiom.condition) for axiom in axiom_from_effect[goal]]))
# print(len(axiom_from_effect[goal]) == 1) # Universal
for axiom in axiom_from_effect[goal]:
possible.update(axiom.condition) # Add goal as well?
else:
necessary.add(goal)
print('Necessary:', necessary)
print('Possible:', possible - necessary)
return possible
def recurse_subgoals(goals, condition_from_effect):
possible = set()
def recurse(goal):
if goal in possible:
return
possible.add(goal)
for condition in condition_from_effect[goal]:
recurse(condition)
for goal in goals:
recurse(goal)
return possible
def analyze_goal(problem, use_actions=False, use_axioms=True, use_streams=True, blocked_predicates=[], **kwargs):
# TODO: instantiate all goal partial states
# TODO: remove actions/axioms that never could achieve a subgoal
domain_pddl, constant_map, stream_pddl, stream_map, init, goal = problem
evaluations = evaluations_from_init(init)
init = set(fd_from_evaluations(evaluations))
# from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import recover_axioms_plans
results, instantiated = examine_instantiated(problem, **kwargs) # TODO: only do if the goals are derived
if instantiated is None:
return None
# optimistic_init = set(instantiated.task.init)
# This is like backchaining in a relaxed space
condition_from_effect = defaultdict(set)
if use_actions:
# TODO: selectively ignore some conditions (e.g. HandEmpty)
# TODO: refactor into separate method
for action in instantiated.actions:
for conditional, effect in action.add_effects:
for condition in (action.precondition + conditional):
if condition.predicate not in blocked_predicates:
condition_from_effect[effect].add(condition)
for conditional, effect in action.del_effects:
for condition in (action.precondition + conditional):
if condition.predicate not in blocked_predicates:
condition_from_effect[effect.negate()].add(condition)
if use_axioms:
# TODO: axiom_rules.handle_axioms(...)
# print('Axioms:', instantiated.axioms)
for axiom in instantiated.axioms:
# axiom = reinstantiate_axiom(axiom)
# axiom.dump()
for condition in axiom.condition:
condition_from_effect[axiom.effect].add(condition)
if use_streams:
for result in results:
for effect in result.certified:
if get_prefix(effect) == EQ:
continue
for condition in result.domain:
condition_from_effect[fd_from_fact(effect)].add(fd_from_fact(condition))
print('Goals:', list(map(fact_from_fd, instantiated.goal_list)))
# all_subgoals = iterate_subgoals(instantiated.goal_list, axiom_from_effect)
all_subgoals = recurse_subgoals(instantiated.goal_list, condition_from_effect)
filtered_subgoals = [subgoal for subgoal in all_subgoals if subgoal in init] # TODO: return the goals as well?
external_subgoals = [value_from_obj_expression(fact_from_fd(subgoal))
for subgoal in sorted(filtered_subgoals, key=lambda g: g.predicate)
if not subgoal.predicate.startswith(INTERNAL_AXIOM)]
print('Initial:', external_subgoals)
return external_subgoals # TODO: decompose into simplified components
| 16,305 |
Python
| 51.6 | 128 | 0.689114 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/visualization.py
|
from __future__ import print_function
import os
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders
from hsr_tamp.pddlstream.language.constants import EQ, get_prefix, get_args, str_from_plan, is_parameter, \
partition_facts
from hsr_tamp.pddlstream.language.conversion import str_from_fact, evaluation_from_fact
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.object import OptimisticObject
from hsr_tamp.pddlstream.utils import clear_dir, ensure_dir, str_from_object, user_input, flatten
# https://www.graphviz.org/doc/info/
DEFAULT_EXTENSION = '.png' # png | pdf
PARAMETER_COLOR = 'LightGreen'
CONSTRAINT_COLOR = 'LightBlue'
NEGATED_COLOR = 'LightYellow'
COST_COLOR = 'LightSalmon'
STREAM_COLOR = 'LightSteelBlue'
FUNCTION_COLOR = 'LightCoral'
VISUALIZATIONS_DIR = 'visualizations/'
CONSTRAINT_NETWORK_DIR = os.path.join(VISUALIZATIONS_DIR, 'constraint_networks/')
STREAM_PLAN_DIR = os.path.join(VISUALIZATIONS_DIR, 'stream_plans/')
PLAN_LOG_FILE = os.path.join(VISUALIZATIONS_DIR, 'log.txt')
ITERATION_TEMPLATE = 'iteration_{}' + DEFAULT_EXTENSION
SYNTHESIZER_TEMPLATE = '{}_{}' + DEFAULT_EXTENSION
##################################################
def has_pygraphviz():
# TODO: networkx
# https://github.com/caelan/pddlstream/blob/82ee5e363585d0af8ff9532ecc14641687d5b56b/examples/fault_tolerant/data_network/run.py#L189
#import networkx
#import graphviz
#import pydot
try:
import pygraphviz
except ImportError:
return False
return True
def reset_visualizations():
clear_dir(VISUALIZATIONS_DIR)
ensure_dir(CONSTRAINT_NETWORK_DIR)
ensure_dir(STREAM_PLAN_DIR)
def log_plans(stream_plan, action_plan, iteration):
# TODO: do this within the focused algorithm itself?
from hsr_tamp.pddlstream.retired.synthesizer import decompose_stream_plan
decomposed_plan = decompose_stream_plan(stream_plan)
with open(PLAN_LOG_FILE, 'a+') as f:
f.write('Iteration: {}\n'
'Component plan: {}\n'
'Stream plan: {}\n'
'Action plan: {}\n\n'.format(
iteration, decomposed_plan,
stream_plan, str_from_plan(action_plan)))
def create_synthesizer_visualizations(result, iteration):
from hsr_tamp.pddlstream.retired.synthesizer import decompose_result
stream_plan = decompose_result(result)
if len(stream_plan) <= 1:
return
# TODO: may overwrite another optimizer if both used on the same iteration
filename = SYNTHESIZER_TEMPLATE.format(result.external.name, iteration)
visualize_constraints(result.get_objectives(), os.path.join(CONSTRAINT_NETWORK_DIR, filename))
visualize_stream_plan_bipartite(stream_plan, os.path.join(STREAM_PLAN_DIR, filename))
def create_visualizations(evaluations, stream_plan, iteration):
# TODO: place it in the temp_dir?
# TODO: decompose any joint streams
for result in stream_plan:
create_synthesizer_visualizations(result, iteration)
filename = ITERATION_TEMPLATE.format(iteration)
# visualize_stream_plan(stream_plan, path)
constraints = set() # TODO: approximates needed facts using produced ones
for stream in stream_plan:
constraints.update(filter(lambda f: evaluation_from_fact(f) not in evaluations, stream.get_certified()))
print('Constraints:', str_from_object(constraints))
visualize_constraints(constraints, os.path.join(CONSTRAINT_NETWORK_DIR, filename))
from hsr_tamp.pddlstream.retired.synthesizer import decompose_stream_plan
decomposed_plan = decompose_stream_plan(stream_plan)
if len(decomposed_plan) != len(stream_plan):
visualize_stream_plan(decompose_stream_plan(stream_plan), os.path.join(STREAM_PLAN_DIR, filename))
#visualize_stream_plan_bipartite(stream_plan, os.path.join(STREAM_PLAN_DIR, 'fused_' + filename))
visualize_stream_plan(stream_plan, os.path.join(STREAM_PLAN_DIR, 'fused_' + filename))
##################################################
def visualize_constraints(constraints, filename='constraint_network'+DEFAULT_EXTENSION, use_functions=True):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=False)
graph.node_attr['style'] = 'filled'
#graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['colorscheme'] = 'SVG'
graph.edge_attr['colorscheme'] = 'SVG'
#graph.graph_attr['rotate'] = 90
#graph.node_attr['fixedsize'] = True
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
graph.graph_attr['rankdir'] = 'RL'
graph.graph_attr['nodesep'] = 0.05
graph.graph_attr['ranksep'] = 0.25
#graph.graph_attr['pad'] = 0
# splines="false";
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
positive, negated, functions = partition_facts(constraints)
for head in (positive + negated + functions):
# TODO: prune values w/o free parameters?
name = str_from_fact(head)
if head in functions:
if not use_functions:
continue
color = COST_COLOR
elif head in negated:
color = NEGATED_COLOR
else:
color = CONSTRAINT_COLOR
graph.add_node(name, shape='box', color=color)
for arg in get_args(head):
if isinstance(arg, OptimisticObject) or is_parameter(arg):
arg_name = str(arg)
graph.add_node(arg_name, shape='circle', color=PARAMETER_COLOR)
graph.add_edge(name, arg_name)
graph.draw(filename, prog='dot') # neato | dot | twopi | circo | fdp | nop
print('Saved', filename)
return graph
##################################################
def display_image(filename):
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread(filename)
plt.imshow(img)
plt.title(filename)
plt.axis('off')
plt.tight_layout()
#plt.show()
plt.draw()
#plt.waitforbuttonpress(0) # this will wait for indefinite time
plt.pause(interval=1e-3)
user_input()
plt.close(plt.figure())
def visualize_stream_orders(orders, streams=[], filename='stream_orders'+DEFAULT_EXTENSION):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=True)
graph.node_attr['style'] = 'filled'
graph.node_attr['shape'] = 'box'
graph.node_attr['color'] = STREAM_COLOR
graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
streams = set(streams) | set(flatten(orders))
for stream in streams:
graph.add_node(str(stream))
for stream1, stream2 in orders:
graph.add_edge(str(stream1), str(stream2))
# TODO: could also print the raw values (or a lookup table)
# https://stackoverflow.com/questions/3499056/making-a-legend-key-in-graphviz
graph.draw(filename, prog='dot')
print('Saved', filename)
#display_image(filename)
return graph
def visualize_stream_plan(stream_plan, filename='stream_plan'+DEFAULT_EXTENSION):
return visualize_stream_orders(get_partial_orders(stream_plan), streams=stream_plan, filename=filename)
##################################################
def visualize_stream_plan_bipartite(stream_plan, filename='stream_plan'+DEFAULT_EXTENSION, use_functions=False):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=True)
graph.node_attr['style'] = 'filled'
graph.node_attr['shape'] = 'box'
graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
#graph.graph_attr['rankdir'] = 'LR'
graph.graph_attr['nodesep'] = 0.1
graph.graph_attr['ranksep'] = 0.25
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
# TODO: store these settings as a dictionary
def add_fact(fact):
head, color = (fact[1], COST_COLOR) if get_prefix(fact) == EQ else (fact, CONSTRAINT_COLOR)
s_fact = str_from_fact(head)
graph.add_node(s_fact, color=color)
return s_fact
def add_stream(stream):
color = FUNCTION_COLOR if isinstance(stream, FunctionResult) else STREAM_COLOR
s_stream = str(stream.instance) if isinstance(stream, FunctionResult) else str(stream)
graph.add_node(s_stream, style='rounded,filled', color=color)
# shape: oval, plaintext, polygon, rarrow, cds
# style: rounded, filled, bold
return s_stream
achieved_facts = set()
for stream in stream_plan:
if not use_functions and isinstance(stream, FunctionResult):
continue
s_stream = add_stream(stream)
for fact in stream.instance.get_domain():
if fact in achieved_facts:
s_fact = add_fact(fact)
graph.add_edge(s_fact, s_stream) # Add initial facts?
#if not isinstance(stream, StreamResult):
# continue
for fact in stream.get_certified():
if fact not in achieved_facts: # Ensures DAG
s_fact = add_fact(fact)
graph.add_edge(s_stream, s_fact)
achieved_facts.add(fact)
graph.draw(filename, prog='dot')
print('Saved', filename)
return graph
# graph.layout
# https://pygraphviz.github.io/documentation/pygraphviz-1.3rc1/reference/agraph.html
# https://pygraphviz.github.io/documentation/stable/reference/agraph.html#pygraphviz.AGraph.draw
| 9,884 |
Python
| 39.346939 | 137 | 0.662687 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/constraints.py
|
from __future__ import print_function
from collections import namedtuple
from copy import deepcopy
from hsr_tamp.pddlstream.algorithms.common import add_fact, INTERNAL_EVALUATION
from hsr_tamp.pddlstream.algorithms.downward import make_predicate, make_preconditions, make_effects, add_predicate, \
fd_from_fact
from hsr_tamp.pddlstream.language.constants import Or, And, is_parameter, Not, str_from_plan, EQ
from hsr_tamp.pddlstream.language.object import Object, OptimisticObject
from hsr_tamp.pddlstream.utils import find_unique, safe_zip, str_from_object, INF, is_hashable, neighbors_from_orders, \
get_ancestors, get_descendants
OrderedSkeleton = namedtuple('OrderedSkeleton', ['actions', 'orders']) # TODO: AND/OR tree
INTERNAL_PREFIX = '_' # TODO: possibly apply elsewhere
WILD = '*'
ASSIGNED_PREDICATE = '{}assigned'
BOUND_PREDICATE = '{}bound' # TODO: switch with assigned
GROUP_PREDICATE = '{}group'
ORDER_PREDICATE = '{}order'
GOAL_INDEX = -1
def linear_order(actions):
if not actions:
return set()
return {(i, i+1) for i in range(len(actions)-1)} \
| {(len(actions)-1, GOAL_INDEX)}
class PlanConstraints(object):
def __init__(self, skeletons=None, groups={}, exact=True, hint=False, max_cost=INF):
# TODO: constraint that the skeleton is the tail of the plan
if skeletons is not None:
skeletons = [skeleton if isinstance(skeleton, OrderedSkeleton)
else OrderedSkeleton(skeleton, linear_order(skeleton)) for skeleton in skeletons]
self.skeletons = skeletons
self.groups = groups # Could make this a list of lists
self.exact = exact
self.max_cost = max_cost
#self.max_length = max_length
#self.hint = hint # TODO: search over skeletons first and then fall back
#if self.hint:
# raise NotImplementedError()
def dump(self):
print('{}(exact={}, max_cost={})'.format(self.__class__.__name__, self.exact, self.max_cost))
if self.skeletons is None:
return
for i, skeleton in enumerate(self.skeletons):
print(i, str_from_plan(skeleton))
def __repr__(self):
return '{}{}'.format(self.__class__.__name__, str_from_object(self.__dict__))
# TODO: rename other costs to be terminate_cost (or decision cost)
def to_constant(parameter):
name = parameter[1:]
return to_obj('@{}'.format(name))
def to_obj(value):
# Allows both raw values as well as objects to be specified
if any(isinstance(value, Class) for Class in [Object, OptimisticObject]):
return value
return Object.from_value(value)
def get_internal_prefix(internal):
return INTERNAL_PREFIX if internal else ''
def is_constant(arg):
return not is_parameter(arg) and (arg != WILD)
##################################################
def add_plan_constraints(constraints, domain, evaluations, goal_exp, internal=False):
if (constraints is None) or (constraints.skeletons is None):
return goal_exp
import pddl
# TODO: unify this with the constraint ordering
# TODO: can constrain to use a plan prefix
prefix = get_internal_prefix(internal)
assigned_predicate = ASSIGNED_PREDICATE.format(prefix)
bound_predicate = BOUND_PREDICATE.format(prefix)
group_predicate = GROUP_PREDICATE.format(prefix)
order_predicate = ORDER_PREDICATE.format(prefix)
new_facts = []
for group in constraints.groups:
for value in constraints.groups[group]:
# TODO: could make all constants groups (like an equality group)
fact = (group_predicate, to_obj(group), to_obj(value))
new_facts.append(fact)
new_actions = []
new_goals = []
for num, skeleton in enumerate(constraints.skeletons):
actions, orders = skeleton
incoming_orders, _ = neighbors_from_orders(orders)
order_facts = [(order_predicate, to_obj('n{}'.format(num)), to_obj('t{}'.format(step)))
for step in range(len(actions))]
for step, (name, args) in enumerate(actions):
# TODO: could also just remove the free parameter from the action
new_action = deepcopy(find_unique(lambda a: a.name == name, domain.actions))
local_from_global = {a: p.name for a, p in safe_zip(args, new_action.parameters) if is_parameter(a)}
ancestors, descendants = get_ancestors(step, orders), get_descendants(step, orders)
parallel = set(range(len(actions))) - ancestors - descendants - {step}
parameters = set(filter(is_parameter, args))
ancestor_parameters = parameters & set(filter(is_parameter, (p for idx in ancestors for p in actions[idx][1])))
#descendant_parameters = parameters & set(filter(is_parameter, (p for idx in descendants for p in actions[idx][1])))
parallel_parameters = parameters & set(filter(is_parameter, (p for idx in parallel for p in actions[idx][1])))
#bound_preconditions = [Imply(bound, assigned) for bound, assigned in safe_zip(bound_facts, assigned_facts)]
bound_condition = pddl.Conjunction([pddl.Disjunction(map(fd_from_fact, [
Not((bound_predicate, to_constant(p))), (assigned_predicate, to_constant(p), local_from_global[p])
])) for p in parallel_parameters])
existing_preconditions = [(assigned_predicate, to_constant(p), local_from_global[p])
for p in ancestor_parameters]
constant_pairs = [(a, p.name) for a, p in safe_zip(args, new_action.parameters) if is_constant(a)]
group_preconditions = [(group_predicate if is_hashable(a) and (a in constraints.groups) else EQ, to_obj(a), p)
for a, p in constant_pairs]
order_preconditions = [order_facts[idx] for idx in incoming_orders[step]]
new_preconditions = existing_preconditions + group_preconditions + order_preconditions + [Not(order_facts[step])]
new_action.precondition = pddl.Conjunction(
[new_action.precondition, bound_condition,
make_preconditions(new_preconditions)]).simplified()
new_parameters = parameters - ancestors
bound_facts = [(bound_predicate, to_constant(p)) for p in new_parameters]
assigned_facts = [(assigned_predicate, to_constant(p), local_from_global[p]) for p in new_parameters]
new_effects = bound_facts + assigned_facts + [order_facts[step]]
new_action.effects.extend(make_effects(new_effects))
# TODO: should also negate the effects of all other sequences here
new_actions.append(new_action)
#new_action.dump()
new_goals.append(And(*[order_facts[idx] for idx in incoming_orders[GOAL_INDEX]]))
add_predicate(domain, make_predicate(order_predicate, ['?num', '?step']))
if constraints.exact:
domain.actions[:] = []
domain.actions.extend(new_actions)
new_goal_exp = And(goal_exp, Or(*new_goals))
for fact in new_facts:
add_fact(evaluations, fact, result=INTERNAL_EVALUATION)
return new_goal_exp
| 7,191 |
Python
| 47.92517 | 128 | 0.649979 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/reorder.py
|
import time
from collections import namedtuple, deque, Counter
from itertools import combinations
from hsr_tamp.pddlstream.language.constants import is_plan
from hsr_tamp.pddlstream.language.external import Result
from hsr_tamp.pddlstream.language.statistics import Stats, Performance, EPSILON
from hsr_tamp.pddlstream.language.stream import StreamResult
from hsr_tamp.pddlstream.utils import INF, neighbors_from_orders, topological_sort, get_connected_components, \
sample_topological_sort, is_acyclic, layer_sort, Score, safe_zip
def get_output_objects(result):
if isinstance(result, StreamResult):
return result.output_objects
return tuple()
def get_object_orders(stream_plan):
# TODO: check that only one result per output object
partial_orders = set()
for i, stream1 in enumerate(stream_plan):
for stream2 in stream_plan[i+1:]:
if set(get_output_objects(stream1)) & stream2.instance.get_all_input_objects():
partial_orders.add((stream1, stream2))
return partial_orders
def get_initial_orders(init_facts, stream_plan):
return {(fact, stream) for stream in stream_plan for fact in stream.get_domain() if fact in init_facts}
def get_fact_orders(stream_plan, init_facts=set()):
# TODO: explicitly recover this from plan_streams
# TODO: init_facts isn't used in practice
achieved_facts = set(init_facts)
partial_orders = set()
for i, stream1 in enumerate(stream_plan):
new_facts = set(stream1.get_certified()) - achieved_facts
for stream2 in stream_plan[i+1:]: # Prevents circular
if new_facts & set(stream2.get_domain()):
partial_orders.add((stream1, stream2))
achieved_facts.update(new_facts)
return partial_orders
def get_partial_orders(stream_plan, use_facts=True, **kwargs):
partial_orders = get_object_orders(stream_plan)
if use_facts:
partial_orders.update(get_fact_orders(stream_plan, **kwargs))
assert is_acyclic(stream_plan, partial_orders)
return partial_orders
##################################################
def get_stream_plan_components(external_plan, **kwargs):
partial_orders = get_partial_orders(external_plan, **kwargs)
return get_connected_components(external_plan, partial_orders)
def dump_components(stream_plan):
for i, result in enumerate(stream_plan):
components = get_stream_plan_components(stream_plan[:i+1])
print(i, len(components), components)
##################################################
def get_future_p_successes(stream_plan):
# TODO: should I use this instead of p_success in some places?
# TODO: learn this instead by estimating conditional probabilities of certain sequence
# TODO: propagate stats_heuristic
orders = get_partial_orders(stream_plan)
incoming_edges, outgoing_edges = neighbors_from_orders(orders)
descendants_map = {}
for s1 in reversed(stream_plan):
descendants_map[s1] = s1.instance.get_p_success()
for s2 in outgoing_edges[s1]:
descendants_map[s1] *= descendants_map[s2]
return descendants_map
def compute_expected_cost(stream_plan, stats_fn=Performance.get_statistics):
if not is_plan(stream_plan):
return INF
expected_cost = 0.
for result in reversed(stream_plan):
p_success, overhead = stats_fn(result)
expected_cost = overhead + p_success * expected_cost
return expected_cost
##################################################
Subproblem = namedtuple('Subproblem', ['cost', 'head', 'subset'])
def compute_pruning_orders(results, stats_fn=Performance.get_statistics, tiebreaker_fn=lambda v: None):
# TODO: reason about pairs that don't have a (transitive) ordering
# TODO: partial orders make this heuristic not optimal
# TODO: use result.external.name to cluster?
dominates = lambda v1, v2: all(s1 <= s2 for s1, s2 in safe_zip(stats_fn(v1), stats_fn(v2))) \
and tiebreaker_fn(v1) <= tiebreaker_fn(v2)
effort_orders = set()
for v1, v2 in combinations(results, r=2): # randomize
if dominates(v1, v2):
effort_orders.add((v1, v2)) # Includes equality
elif dominates(v2, v1):
effort_orders.add((v2, v1))
return effort_orders
def dynamic_programming(store, vertices, valid_head_fn, stats_fn=Performance.get_statistics, prune=True, greedy=False, **kwargs):
# TODO: include context here as a weak constraint
# TODO: works in the absence of partial orders
# TODO: can also more manually reorder
# 2^N rather than N!
start_time = time.time()
effort_orders = set() # 1 cheaper than 2
if prune:
effort_orders.update(compute_pruning_orders(vertices, stats_fn=stats_fn, **kwargs))
_, out_priority_orders = neighbors_from_orders(effort_orders) # more expensive
priority_ordering = topological_sort(vertices, effort_orders)[::-1] # most expensive to cheapest
# TODO: can break ties with index on action plan to prioritize doing the temporally first things
# TODO: could the greedy strategy lead to premature choices
# TODO: this starts to blow up - group together similar streams (e.g. collision streams) to decrease size
# TODO: key grouping concern are partial orders and ensuring feasibility (isomorphism)
# TODO: flood-fill cheapest as soon as something that has no future dependencies has been found
# TODO: do the forward version to take advantage of sink vertices
subset = frozenset()
queue = deque([subset]) # Acyclic because subsets
subproblems = {subset: Subproblem(cost=0, head=None, subset=None)}
while queue: # searches backward from last to first
if store.is_terminated():
return vertices
subset = queue.popleft() # TODO: greedy/weighted A* version of this (heuristic is next cheapest stream)
applied = set()
# TODO: roll-out more than one step to cut the horizon
# TODO: compute a heuristic that's the best case affordances from subsequent streams
for v in priority_ordering: # most expensive first
if greedy and applied:
break
if (v not in subset) and valid_head_fn(v, subset) and not (out_priority_orders[v] & applied):
applied.add(v)
new_subset = frozenset([v]) | subset
p_success, overhead = stats_fn(v)
new_cost = overhead + p_success*subproblems[subset].cost
subproblem = Subproblem(cost=new_cost, head=v, subset=subset) # Adds new element to the front
if new_subset not in subproblems:
queue.append(new_subset)
subproblems[new_subset] = subproblem
elif new_cost < subproblems[new_subset].cost:
subproblems[new_subset] = subproblem
ordering = []
subset = frozenset(vertices)
while True:
if subset not in subproblems:
print(vertices)
# TODO: some sort of bug where the problem isn't solved?
subproblem = subproblems[subset]
if subproblem.head is None:
break
ordering.append(subproblem.head)
subset = subproblem.subset
#print('Streams: {} | Expected cost: {:.3f} | Time: {:.3f}'.format(
# len(ordering), compute_expected_cost(ordering, stats_fn=stats_fn), elapsed_time(start_time)))
return ordering
##################################################
def dummy_reorder_stream_plan(stream_plan, **kwargs):
return stream_plan
def random_reorder_stream_plan(stream_plan, **kwargs):
if not stream_plan:
return stream_plan
return sample_topological_sort(stream_plan, get_partial_orders(stream_plan))
def greedy_reorder_stream_plan(stream_plan, **kwargs):
if not stream_plan:
return stream_plan
return topological_sort(stream_plan, get_partial_orders(stream_plan),
priority_fn=lambda s: s.get_statistics().overhead)
##################################################
def dump_layers(distances):
streams_from_layer = {}
for stream, layer in distances.items():
streams_from_layer.setdefault(layer, []).append(stream)
for layer, streams in streams_from_layer.items():
print(layer, sorted(streams, key=Result.stats_heuristic, reverse=True))
return streams_from_layer
def compute_distances(stream_plan):
stream_orders = get_partial_orders(stream_plan)
reversed_orders = {(s2, s1) for s1, s2 in stream_orders}
in_stream_orders, out_stream_orders = neighbors_from_orders(reversed_orders)
sources = {stream for stream in stream_plan if not in_stream_orders[stream]} # In the reversed DAG
output_sources = {stream for stream in sources if stream.external.has_outputs}
test_sources = sources - output_sources
#visited = dijkstra(output_sources, reversed_orders)
#distances = {stream: node.g for stream, node in visited.items()}
distances = layer_sort(set(stream_plan) - test_sources, reversed_orders)
# TODO: take into account argument overlap
max_distance = max([0] + list(distances.values()))
for stream in stream_plan:
if stream not in distances:
distances[stream] = min([max_distance] + [distances[s] - 1 for s in out_stream_orders[stream]])
#dump_layers(distances)
return distances
def layer_reorder_stream_plan(stream_plan, **kwargs):
if not stream_plan:
return stream_plan
stream_orders = get_partial_orders(stream_plan)
reversed_orders = {(s2, s1) for s1, s2 in stream_orders}
distances = compute_distances(stream_plan)
priority_fn = lambda s: Score(not s.external.has_outputs, distances[s], -s.stats_heuristic())
reverse_order = topological_sort(stream_plan, reversed_orders, priority_fn=priority_fn)
return reverse_order[::-1]
def compute_statistics(stream_plan, bias=True):
stats_from_stream = {result: result.external.get_statistics() for result in stream_plan}
if not bias:
return stats_from_stream
distances = compute_distances(stream_plan)
max_distance = max(distances.values())
for result in stream_plan:
p_success, overhead = stats_from_stream[result]
if result.external.has_outputs:
# TODO: is_function, number of free inputs, etc.
# TODO: decrease p_success if fewer free inputs (or input streams)
# TODO: dynamic_programming seems to automatically order streams with fewer free ahead anyways
overhead += EPSILON*(max_distance - distances[result] + 1)
else:
p_success *= EPSILON
stats_from_stream[result] = Stats(p_success, overhead)
return stats_from_stream
##################################################
def optimal_reorder_stream_plan(store, stream_plan, stats_from_stream=None, **kwargs):
if not stream_plan:
return stream_plan
if stats_from_stream is None:
stats_from_stream = compute_statistics(stream_plan)
# TODO: use the negative output (or overhead) as a bound
indices = range(len(stream_plan))
index_from_stream = dict(zip(stream_plan, indices))
stream_orders = get_partial_orders(stream_plan)
stream_orders = {(index_from_stream[s1], index_from_stream[s2]) for s1, s2 in stream_orders}
#nodes = stream_plan
nodes = indices # TODO: are indices actually much faster?
in_stream_orders, out_stream_orders = neighbors_from_orders(stream_orders)
valid_combine = lambda v, subset: out_stream_orders[v] <= subset
#valid_combine = lambda v, subset: in_stream_orders[v] & subset
# TODO: these are special because they don't enable any downstream access to another stream
#sources = {stream_plan[index] for index in indices if not in_stream_orders[index]}
#sinks = {stream_plan[index] for index in indices if not out_stream_orders[index]} # Contains collision checks
#print(dijkstra(sources, get_partial_orders(stream_plan)))
stats_fn = lambda idx: stats_from_stream[stream_plan[idx]]
#tiebreaker_fn = lambda *args: 0
#tiebreaker_fn = lambda *args: random.random() # TODO: introduces cycles
tiebreaker_fn = lambda idx: stream_plan[idx].stats_heuristic()
ordering = dynamic_programming(store, nodes, valid_combine, stats_fn=stats_fn, tiebreaker_fn=tiebreaker_fn, **kwargs)
#import gc
#gc.collect()
return [stream_plan[index] for index in ordering]
##################################################
def reorder_stream_plan(store, stream_plan, algorithm=None, **kwargs):
if not stream_plan:
return stream_plan
stats_from_stream = compute_statistics(stream_plan)
stats = Counter(stats_from_stream.values())
if algorithm is None:
algorithm = 'layer' if len(stats) <= 1 else 'optimal'
if algorithm == 'dummy':
return dummy_reorder_stream_plan(stream_plan, **kwargs)
if algorithm == 'random':
return random_reorder_stream_plan(stream_plan, **kwargs)
if algorithm == 'greedy':
return greedy_reorder_stream_plan(stream_plan, **kwargs)
if algorithm == 'layer':
#print('Heuristic reordering:', stats)
return layer_reorder_stream_plan(stream_plan, **kwargs)
if algorithm == 'optimal':
#print('Optimal reordering:', stats)
return optimal_reorder_stream_plan(store, stream_plan, stats_from_stream, **kwargs)
raise NotImplementedError(algorithm)
| 13,469 |
Python
| 44.972696 | 129 | 0.663895 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/reinstantiate.py
|
from hsr_tamp.pddlstream.algorithms.downward import apply_action, get_conjunctive_parts
from hsr_tamp.pddlstream.algorithms.instantiate_task import get_goal_instance
from hsr_tamp.pddlstream.utils import MockSet
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE
import pddl
import instantiate
def instantiate_unsatisfiable(state, action, var_mapping, negative_from_name={}):
precondition = []
for effect in action.effects:
if effect.literal.predicate == UNSATISFIABLE:
# Condition must be false for plan to succeed
conditions = set(get_conjunctive_parts(effect.condition))
negative = {literal for literal in conditions if literal.predicate in negative_from_name}
if not negative:
continue
assert len(negative) == 1
# TODO: handle the case where negative is not used (not (CFree ..))
normal_conjunction = pddl.Conjunction(conditions - negative)
# TODO: assumes that can instantiate with just predicate_to_atoms
normal_effect = pddl.Effect(effect.parameters, normal_conjunction, effect.literal)
# TODO: avoid recomputing these
objects_by_type = instantiate.get_objects_by_type([], [])
predicate_to_atoms = instantiate.get_atoms_by_predicate(state)
result = []
normal_effect.instantiate(var_mapping, state, {effect.literal},
objects_by_type, predicate_to_atoms, result)
for _, _, _, mapping in result:
for literal in negative:
new_literal = literal.rename_variables(mapping).negate()
assert (not new_literal.free_variables())
precondition.append(new_literal)
return precondition
def reinstantiate_action(state, instance, negative_from_name={}):
# Recomputes the instances with without any pruned preconditions
# TODO: making the assumption that no negative derived predicates
action = instance.action
var_mapping = instance.var_mapping
init_facts = set()
fluent_facts = MockSet()
precondition = []
try:
action.precondition.instantiate(var_mapping, init_facts, fluent_facts, precondition)
except pddl.conditions.Impossible:
return None
precondition = list(set(precondition)) + instantiate_unsatisfiable(state, action, var_mapping, negative_from_name)
effects = []
effect_from_literal = {literal: (cond, effect, effect_mapping)
for cond, literal, effect, effect_mapping in instance.effect_mappings}
for literal in instance.applied_effects:
cond, effect, effect_mapping = effect_from_literal[literal]
if effect is None: # Stream effect
#effects.append((cond, literal, cond, effect))
continue
else:
effect._instantiate(effect_mapping, init_facts, fluent_facts, effects)
new_effects = []
for cond, effect, e, m in effects:
precondition.extend(cond)
new_effects.append(([], effect, e, m))
return pddl.PropositionalAction(instance.name, precondition, new_effects, instance.cost, action, var_mapping)
def reinstantiate_action_instances(task, old_instances, **kwargs):
# Recomputes the instances with without any pruned preconditions
state = set(task.init)
new_instances = []
for old_instance in old_instances:
# TODO: better way of instantiating conditional effects (when not fluent)
new_instance = reinstantiate_action(state, old_instance, **kwargs)
assert (new_instance is not None)
new_instances.append(new_instance)
apply_action(state, new_instance)
new_instances.append(get_goal_instance(task.goal)) # TODO: move this?
return new_instances
##################################################
def reinstantiate_axiom(old_instance, init_facts=set(), fluent_facts=MockSet()):
axiom = old_instance.axiom
var_mapping = old_instance.var_mapping
new_instance = axiom.instantiate(var_mapping, init_facts, fluent_facts)
assert (new_instance is not None)
return new_instance
def reinstantiate_axiom_instances(old_instances, **kwargs):
return [reinstantiate_axiom(old_instance, **kwargs) for old_instance in old_instances]
| 4,332 |
Python
| 45.591397 | 118 | 0.667359 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/stream_action.py
|
from collections import OrderedDict
from hsr_tamp.pddlstream.algorithms.downward import make_action, make_parameters, make_domain
from hsr_tamp.pddlstream.language.constants import Not
from hsr_tamp.pddlstream.language.conversion import pddl_from_object, substitute_expression
from hsr_tamp.pddlstream.language.statistics import check_effort
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.stream import StreamResult
from hsr_tamp.pddlstream.utils import INF
BOUND_PREDICATE = '_bound'
def enforce_single_binding(result, preconditions, effects):
binding_facts = [(BOUND_PREDICATE, pddl_from_object(out)) for out in result.output_objects]
preconditions.extend(Not(fact) for fact in binding_facts)
effects.extend(fact for fact in binding_facts)
def get_stream_actions(results, unique_binding=False, effort_scale=1, max_effort=INF, **kwargs):
result_from_name = OrderedDict()
stream_actions = []
for result in results:
#if not isinstance(stream_result, StreamResult):
if type(result) == FunctionResult:
continue
effort = result.get_effort(**kwargs)
if not check_effort(effort, max_effort):
continue
name = '{}-{}'.format(result.external.name, len(result_from_name))
#name = '{}_{}_{}'.format(result.external.name, # No spaces & parens
# ','.join(map(pddl_from_object, result.instance.input_objects)),
# ','.join(map(pddl_from_object, result.output_objects)))
assert name not in result_from_name
result_from_name[name] = result
preconditions = list(result.instance.get_domain())
effects = list(result.get_certified()) + [result.stream_fact]
if unique_binding:
enforce_single_binding(result, preconditions, effects)
cost = effort_scale * effort
stream_actions.append(make_action(name, [], preconditions, effects, cost))
return stream_actions, result_from_name
def add_stream_actions(domain, results, **kwargs):
if not results:
return domain, {}
stream_actions, result_from_name = get_stream_actions(results, **kwargs)
output_objects = []
for result in result_from_name.values():
if isinstance(result, StreamResult):
output_objects.extend(map(pddl_from_object, result.output_objects))
new_constants = list(make_parameters(set(output_objects) | set(domain.constants)))
# to_untyped_strips, free_variables
new_domain = make_domain(constants=new_constants, predicates=domain.predicates,
actions=domain.actions[:] + stream_actions, axioms=domain.axioms)
#new_domain = copy.copy(domain)
return new_domain, result_from_name
| 2,793 |
Python
| 47.172413 | 96 | 0.688149 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/negative.py
|
import time
from hsr_tamp.pddlstream.algorithms.downward import fact_from_fd, plan_preimage, apply_action, \
GOAL_NAME, get_derived_predicates, literal_holds
from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import extract_axiom_plan
from hsr_tamp.pddlstream.algorithms.scheduling.reinstantiate import reinstantiate_action_instances, reinstantiate_axiom_instances
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl
from hsr_tamp.pddlstream.language.function import Predicate, PredicateResult
from hsr_tamp.pddlstream.language.stream import Stream
from hsr_tamp.pddlstream.utils import safe_zip, INF, elapsed_time
def convert_negative_predicate(negative, literal, step_from_atom, negative_plan):
input_objects = tuple(map(obj_from_pddl, literal.args)) # Might be negative
instance = negative.get_instance(input_objects)
value = not literal.negated
if instance.enumerated:
assert (instance.value == value)
else:
result = PredicateResult(instance, value, optimistic=True)
step = min(step_from_atom[literal]) if result.is_deferrable() else 0
negative_plan[result] = min(step, negative_plan.get(result, INF))
def get_negative_result(negative, input_objects, fluent_facts=frozenset()):
instance = negative.get_instance(input_objects, fluent_facts=fluent_facts)
optimistic = not instance.successful # TODO: clean this up
return instance._Result(instance, output_objects=tuple(), opt_index=instance.opt_index,
call_index=instance.num_calls, optimistic=optimistic)
def convert_negative_stream(negative, literal, step_from_atom, real_states, negative_plan):
import pddl
# assert not negative.is_fluent
fluent_facts_list = []
if negative.is_fluent:
# TODO: ensure that only used once?
for step in step_from_atom[literal]:
fluent_facts_list.append(list(map(fact_from_fd, filter(
lambda f: isinstance(f, pddl.Atom) and (f.predicate in negative.fluents), real_states[step]))))
else:
fluent_facts_list.append(frozenset())
input_objects = tuple(map(obj_from_pddl, literal.args)) # Might be negative
for fluent_facts in fluent_facts_list:
result = get_negative_result(negative, input_objects, fluent_facts)
#if not result.instance.successful: # Doesn't work with reachieve=True
step = min(step_from_atom[literal]) if result.is_deferrable() else 0
negative_plan[result] = min(step, negative_plan.get(result, INF))
def convert_negative(negative_preimage, negative_from_name, step_from_atom, real_states):
negative_plan = {}
for literal in negative_preimage:
negative = negative_from_name[literal.predicate]
if isinstance(negative, Predicate):
convert_negative_predicate(negative, literal, step_from_atom, negative_plan)
elif isinstance(negative, Stream):
convert_negative_stream(negative, literal, step_from_atom, real_states, negative_plan)
else:
raise ValueError(negative)
return negative_plan
##################################################
def recover_negative_axioms(real_task, opt_task, axiom_plans, action_plan, negative_from_name):
start_time = time.time()
action_plan = reinstantiate_action_instances(opt_task, action_plan, negative_from_name=negative_from_name)
# https://github.com/caelan/pddlstream/commit/18b303e19bbab9f8e0016fbb2656f461067e1e94#diff-55454a85485551f9139e20a446b56a83L53
#simplify_conditional_effects(opt_task, action_plan, negative_from_name)
axiom_plans = list(map(reinstantiate_axiom_instances, axiom_plans))
axioms_from_name = get_derived_predicates(opt_task.axioms)
# TODO: could instead just accumulate difference between real and opt
opt_task.init = set(opt_task.init)
real_states = [set(real_task.init)]
num_negative = 0
preimage_plan = []
for axiom_plan, action_instance in safe_zip(axiom_plans, action_plan):
preimage = [l for l in plan_preimage(axiom_plan + [action_instance])
if (l.predicate in axioms_from_name)]
#assert conditions_hold(opt_task.init, conditions)
# TODO: only add derived facts and negative facts to fluent state to make normalizing easier
negative_axiom_plan = extract_axiom_plan(opt_task, preimage, negative_from_name,
static_state=opt_task.init)
#static_state=real_states[-1])
assert negative_axiom_plan is not None
num_negative += len(negative_axiom_plan)
preimage_plan.extend(negative_axiom_plan + axiom_plan + [action_instance])
if action_instance.name != GOAL_NAME:
apply_action(opt_task.init, action_instance)
real_states.append(set(real_states[-1]))
apply_action(real_states[-1], action_instance)
#print('Steps: {} | Negative: {} | Preimage: {} | Time: {:.3f}'.format(
# len(action_plan), num_negative, len(preimage_plan), elapsed_time(start_time)))
return real_states, preimage_plan
| 5,142 |
Python
| 53.712765 | 131 | 0.690393 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/recover_functions.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.utils import INF
from hsr_tamp.pddlstream.language.constants import is_parameter, Head
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl
def extract_function_result(results_from_head, action, pddl_args):
import pddl
if action.cost is None:
return None
# TODO: retrieve constant action costs
# TODO: associate costs with the steps they are applied
expression = action.cost.expression
if not isinstance(expression, pddl.PrimitiveNumericExpression):
return None
var_mapping = {p.name: a for p, a in zip(action.parameters, pddl_args)}
obj_args = tuple(obj_from_pddl(var_mapping[p] if is_parameter(p) else p)
for p in expression.args)
head = Head(expression.symbol, obj_args)
[result] = results_from_head[head]
if result is None:
return None
return result
def compute_function_plan(opt_evaluations, action_plan):
results_from_head = defaultdict(list)
for evaluation, result in opt_evaluations.items():
results_from_head[evaluation.head].append(result)
step_from_function = {}
for step, action_instance in enumerate(action_plan):
action = action_instance.action
if action is None:
continue
args = [action_instance.var_mapping[p.name] for p in action.parameters]
result = extract_function_result(results_from_head, action, args)
if result is not None:
step_from_function[result] = min(step, step_from_function.get(result, INF))
if not result.is_deferrable():
step_from_function[result] = 0
#function_from_instance[action_instance] = result
return step_from_function
| 1,766 |
Python
| 39.15909 | 87 | 0.685164 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/utils.py
|
from hsr_tamp.pddlstream.algorithms.downward import add_predicate, make_predicate, get_literals, fact_from_fd, conditions_hold, \
apply_action, get_derived_predicates
from hsr_tamp.pddlstream.language.constants import And, Not
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.utils import apply_mapping
def partition_results(evaluations, results, apply_now):
applied_results = []
deferred_results = []
opt_evaluations = set(evaluations)
for result in results:
assert(not result.instance.disabled)
assert(not result.instance.enumerated)
domain = set(map(evaluation_from_fact, result.instance.get_domain()))
if isinstance(result, FunctionResult) or (apply_now(result) and (domain <= opt_evaluations)):
applied_results.append(result)
opt_evaluations.update(map(evaluation_from_fact, result.get_certified()))
else:
deferred_results.append(result)
return applied_results, deferred_results
def partition_external_plan(external_plan):
function_plan = list(filter(lambda r: isinstance(r, FunctionResult), external_plan))
stream_plan = list(filter(lambda r: r not in function_plan, external_plan))
return stream_plan, function_plan
def add_unsatisfiable_to_goal(domain, goal_expression):
#return goal_expression
import pddl
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE
add_predicate(domain, make_predicate(UNSATISFIABLE, []))
negated_atom = pddl.NegatedAtom(UNSATISFIABLE, tuple())
for action in domain.actions:
if negated_atom not in action.precondition.parts:
action.precondition = pddl.Conjunction([action.precondition, negated_atom]).simplified()
#return goal_expression
return And(goal_expression, Not((UNSATISFIABLE,)))
def get_instance_facts(instance, node_from_atom):
# TODO: ignores conditional effect conditions
facts = []
for precondition in get_literals(instance.action.precondition):
if precondition.negated:
continue
args = apply_mapping(precondition.args, instance.var_mapping)
literal = precondition.__class__(precondition.predicate, args)
fact = fact_from_fd(literal)
if fact in node_from_atom:
facts.append(fact)
return facts
| 2,417 |
Python
| 45.499999 | 129 | 0.721142 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/apply_fluents.py
|
import copy
from hsr_tamp.pddlstream.algorithms.downward import fact_from_fd
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders
from hsr_tamp.pddlstream.language.conversion import pddl_from_object
from hsr_tamp.pddlstream.language.object import OptimisticObject, UniqueOptValue
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.utils import neighbors_from_orders, get_mapping, safe_zip
def get_steps_from_stream(stream_plan, step_from_fact, node_from_atom):
steps_from_stream = {}
for result in reversed(stream_plan):
steps_from_stream[result] = set()
for fact in result.get_certified():
if (fact in step_from_fact) and (node_from_atom[fact].result == result):
steps_from_stream[result].update(step_from_fact[fact])
for fact in result.instance.get_domain():
step_from_fact[fact] = step_from_fact.get(fact, set()) | steps_from_stream[result]
# TODO: apply this recursively
return steps_from_stream
def get_fluent_instance(external, input_objects, state):
import pddl
fluent_facts = map(fact_from_fd, filter(
lambda f: isinstance(f, pddl.Atom) and (f.predicate in external.fluents), state))
return external.get_instance(input_objects, fluent_facts=fluent_facts)
def convert_fluent_streams(stream_plan, real_states, action_plan, step_from_fact, node_from_atom):
#return stream_plan
import pddl
assert len(real_states) == len(action_plan) + 1
steps_from_stream = get_steps_from_stream(stream_plan, step_from_fact, node_from_atom)
# TODO: ensure that derived facts aren't in fluents?
# TODO: handle case where costs depend on the outputs
_, outgoing_edges = neighbors_from_orders(get_partial_orders(stream_plan, init_facts=map(
fact_from_fd, filter(lambda f: isinstance(f, pddl.Atom), real_states[0]))))
static_plan = []
fluent_plan = []
for result in stream_plan:
external = result.external
if isinstance(result, FunctionResult) or (result.opt_index != 0) or (not external.is_fluent):
static_plan.append(result)
continue
if outgoing_edges[result]:
# No way of taking into account the binding of fluent inputs when preventing cycles
raise NotImplementedError('Fluent stream is required for another stream: {}'.format(result))
#if (len(steps_from_stream[result]) != 1) and result.output_objects:
# raise NotImplementedError('Fluent stream required in multiple states: {}'.format(result))
for state_index in steps_from_stream[result]:
new_output_objects = [
#OptimisticObject.from_opt(out.value, object())
OptimisticObject.from_opt(out.value, UniqueOptValue(result.instance, object(), name))
for name, out in safe_zip(result.external.outputs, result.output_objects)]
if new_output_objects and (state_index <= len(action_plan) - 1):
# TODO: check that the objects aren't used in any effects
instance = copy.copy(action_plan[state_index])
action_plan[state_index] = instance
output_mapping = get_mapping(list(map(pddl_from_object, result.output_objects)),
list(map(pddl_from_object, new_output_objects)))
instance.var_mapping = {p: output_mapping.get(v, v)
for p, v in instance.var_mapping.items()}
new_instance = get_fluent_instance(external, result.instance.input_objects, real_states[state_index])
# TODO: handle optimistic here
new_result = new_instance.get_result(new_output_objects, opt_index=result.opt_index)
fluent_plan.append(new_result)
return static_plan + fluent_plan
| 3,880 |
Python
| 56.073529 | 113 | 0.665206 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/recover_streams.py
|
from collections import namedtuple, defaultdict
from heapq import heappop, heappush
from hsr_tamp.pddlstream.language.conversion import is_negated_atom, fact_from_evaluation, evaluation_from_fact
from hsr_tamp.pddlstream.language.statistics import check_effort
from hsr_tamp.pddlstream.utils import HeapElement, INF, implies
Node = namedtuple('Node', ['effort', 'result']) # TODO: include level
EFFORT_OP = sum # max | sum
NULL_COND = (None,)
def get_achieving_streams(evaluations, stream_results, max_effort=INF, **effort_args):
unprocessed_from_atom = defaultdict(list)
node_from_atom = {NULL_COND: Node(0, None)}
conditions_from_stream = {}
remaining_from_stream = {}
for result in stream_results:
conditions_from_stream[result] = result.instance.get_domain() + (NULL_COND,)
remaining_from_stream[result] = len(conditions_from_stream[result])
for atom in conditions_from_stream[result]:
unprocessed_from_atom[atom].append(result)
for atom in evaluations:
if not is_negated_atom(atom):
node_from_atom[fact_from_evaluation(atom)] = Node(0, None)
queue = [HeapElement(node.effort, atom) for atom, node in node_from_atom.items()]
while queue:
atom = heappop(queue).value
if atom not in unprocessed_from_atom:
continue
for result in unprocessed_from_atom[atom]:
remaining_from_stream[result] -= 1
if remaining_from_stream[result]:
continue
effort = result.get_effort(**effort_args)
total_effort = effort + EFFORT_OP(
node_from_atom[cond].effort for cond in conditions_from_stream[result])
if (max_effort is not None) and (max_effort <= total_effort):
continue
for new_atom in result.get_certified():
if (new_atom not in node_from_atom) or (total_effort < node_from_atom[new_atom].effort):
node_from_atom[new_atom] = Node(total_effort, result)
heappush(queue, HeapElement(total_effort, new_atom))
del unprocessed_from_atom[atom]
del node_from_atom[NULL_COND]
return node_from_atom
def evaluations_from_stream_plan(evaluations, stream_results, max_effort=INF):
opt_evaluations = set(evaluations)
for result in stream_results:
if result.instance.disabled or result.instance.enumerated:
raise RuntimeError(result)
domain = set(map(evaluation_from_fact, result.instance.get_domain()))
assert(domain <= opt_evaluations)
opt_evaluations.update(map(evaluation_from_fact, result.get_certified()))
node_from_atom = get_achieving_streams(evaluations, stream_results)
result_from_evaluation = {evaluation_from_fact(f): n.result for f, n in node_from_atom.items()
if check_effort(n.effort, max_effort)}
return result_from_evaluation
def extract_stream_plan(node_from_atom, target_facts, stream_plan):
# TODO: prune with rules
# TODO: linearization that takes into account satisfied goals at each level
# TODO: can optimize for all streams & axioms all at once
for fact in target_facts:
if fact not in node_from_atom:
raise RuntimeError('Preimage fact {} is not achievable!'.format(fact))
#RuntimeError: Preimage fact ('new-axiom@0',) is not achievable!
result = node_from_atom[fact].result
if result is None:
continue
extract_stream_plan(node_from_atom, result.instance.get_domain(), stream_plan)
if result not in stream_plan:
# TODO: dynamic programming version that doesn't reconsider facts
# TODO: don't add if the fact is already satisfied
stream_plan.append(result)
| 3,800 |
Python
| 48.363636 | 111 | 0.662632 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/plan_streams.py
|
from __future__ import print_function
import copy
from collections import defaultdict, namedtuple
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem, get_cost_scale, \
conditions_hold, apply_action, scale_cost, fd_from_fact, make_domain, make_predicate, evaluation_from_fd, \
plan_preimage, fact_from_fd, USE_FORBID, pddl_from_instance, parse_action
from hsr_tamp.pddlstream.algorithms.instantiate_task import instantiate_task, sas_from_instantiated, FD_INSTANTIATE
from hsr_tamp.pddlstream.algorithms.scheduling.add_optimizers import add_optimizer_effects, \
using_optimizers, recover_simultaneous
from hsr_tamp.pddlstream.algorithms.scheduling.apply_fluents import convert_fluent_streams
from hsr_tamp.pddlstream.algorithms.scheduling.negative import recover_negative_axioms, convert_negative
from hsr_tamp.pddlstream.algorithms.scheduling.postprocess import postprocess_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import recover_axioms_plans
from hsr_tamp.pddlstream.algorithms.scheduling.recover_functions import compute_function_plan
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan, \
evaluations_from_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.stream_action import add_stream_actions
from hsr_tamp.pddlstream.algorithms.scheduling.utils import partition_results, \
add_unsatisfiable_to_goal, get_instance_facts
from hsr_tamp.pddlstream.algorithms.search import solve_from_task
from hsr_tamp.pddlstream.algorithms.advanced import UNIVERSAL_TO_CONDITIONAL
from hsr_tamp.pddlstream.language.constants import Not, get_prefix, EQ, FAILED, OptPlan, Action
from hsr_tamp.pddlstream.language.conversion import obj_from_pddl_plan, evaluation_from_fact, \
fact_from_evaluation, transform_plan_args, transform_action_args, obj_from_pddl
from hsr_tamp.pddlstream.language.external import Result
from hsr_tamp.pddlstream.language.exogenous import get_fluent_domain
from hsr_tamp.pddlstream.language.function import Function
from hsr_tamp.pddlstream.language.stream import StreamResult
from hsr_tamp.pddlstream.language.optimizer import UNSATISFIABLE
from hsr_tamp.pddlstream.language.statistics import compute_plan_effort
from hsr_tamp.pddlstream.language.temporal import SimplifiedDomain, solve_tfd
from hsr_tamp.pddlstream.language.write_pddl import get_problem_pddl
from hsr_tamp.pddlstream.language.object import Object
from hsr_tamp.pddlstream.utils import Verbose, INF, topological_sort, get_ancestors
RENAME_ACTIONS = True
#RENAME_ACTIONS = not USE_FORBID
OptSolution = namedtuple('OptSolution', ['stream_plan', 'opt_plan', 'cost']) # TODO: move to the below
#OptSolution = namedtuple('OptSolution', ['stream_plan', 'action_plan', 'cost', 'supporting_facts', 'axiom_plan'])
##################################################
def add_stream_efforts(node_from_atom, instantiated, effort_weight, **kwargs):
if effort_weight is None:
return
# TODO: make effort just a multiplier (or relative) to avoid worrying about the scale
# TODO: regularize & normalize across the problem?
#efforts = []
for instance in instantiated.actions:
# TODO: prune stream actions here?
# TODO: round each effort individually to penalize multiple streams
facts = get_instance_facts(instance, node_from_atom)
#effort = COMBINE_OP([0] + [node_from_atom[fact].effort for fact in facts])
stream_plan = []
extract_stream_plan(node_from_atom, facts, stream_plan)
effort = compute_plan_effort(stream_plan, **kwargs)
instance.cost += scale_cost(effort_weight*effort)
# TODO: store whether it uses shared/unique outputs and prune too expensive streams
#efforts.append(effort)
#print(min(efforts), efforts)
##################################################
def rename_instantiated_actions(instantiated, rename):
# TODO: rename SAS instead?
actions = instantiated.actions[:]
renamed_actions = []
action_from_name = {}
for i, action in enumerate(actions):
renamed_actions.append(copy.copy(action))
renamed_name = 'a{}'.format(i) if rename else action.name
renamed_actions[-1].name = '({})'.format(renamed_name)
action_from_name[renamed_name] = action # Change reachable_action_params?
instantiated.actions[:] = renamed_actions
return action_from_name
##################################################
def get_plan_cost(action_plan, cost_from_action):
if action_plan is None:
return INF
# TODO: return cost per action instance
#return sum([0.] + [instance.cost for instance in action_plan])
scaled_cost = sum([0.] + [cost_from_action[instance] for instance in action_plan])
return scaled_cost / get_cost_scale()
def instantiate_optimizer_axioms(instantiated, domain, results):
# Needed for instantiating axioms before adding stream action effects
# Otherwise, FastDownward will prune these unreachable axioms
# TODO: compute this first and then apply the eager actions
stream_init = {fd_from_fact(result.stream_fact)
for result in results if isinstance(result, StreamResult)}
evaluations = list(map(evaluation_from_fd, stream_init | instantiated.atoms))
temp_domain = make_domain(predicates=[make_predicate(UNSATISFIABLE, [])],
axioms=[ax for ax in domain.axioms if ax.name == UNSATISFIABLE])
temp_problem = get_problem(evaluations, Not((UNSATISFIABLE,)), temp_domain)
# TODO: UNSATISFIABLE might be in atoms making the goal always infeasible
with Verbose():
# TODO: the FastDownward instantiation prunes static preconditions
use_fd = False if using_optimizers(results) else FD_INSTANTIATE
new_instantiated = instantiate_task(task_from_domain_problem(temp_domain, temp_problem),
use_fd=use_fd, check_infeasible=False, prune_static=False)
assert new_instantiated is not None
instantiated.axioms.extend(new_instantiated.axioms)
instantiated.atoms.update(new_instantiated.atoms)
##################################################
def recover_partial_orders(stream_plan, node_from_atom):
# Useful to recover the correct DAG
partial_orders = set()
for child in stream_plan:
# TODO: account for fluent objects
for fact in child.get_domain():
parent = node_from_atom[fact].result
if parent is not None:
partial_orders.add((parent, child))
#stream_plan = topological_sort(stream_plan, partial_orders)
return partial_orders
def recover_stream_plan(evaluations, current_plan, opt_evaluations, goal_expression, domain, node_from_atom,
action_plan, axiom_plans, negative, replan_step):
# Universally quantified conditions are converted into negative axioms
# Existentially quantified conditions are made additional preconditions
# Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
# Added effects cancel out removed effects
# TODO: node_from_atom is a subset of opt_evaluations (only missing functions)
real_task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain))
opt_task = task_from_domain_problem(domain, get_problem(opt_evaluations, goal_expression, domain))
negative_from_name = {external.blocked_predicate: external for external in negative if external.is_negated}
real_states, full_plan = recover_negative_axioms(
real_task, opt_task, axiom_plans, action_plan, negative_from_name)
function_plan = compute_function_plan(opt_evaluations, action_plan)
full_preimage = plan_preimage(full_plan, []) # Does not contain the stream preimage!
negative_preimage = set(filter(lambda a: a.predicate in negative_from_name, full_preimage))
negative_plan = convert_negative(negative_preimage, negative_from_name, full_preimage, real_states)
function_plan.update(negative_plan)
# TODO: OrderedDict for these plans
# TODO: this assumes that actions do not negate preimage goals
positive_preimage = {l for l in (set(full_preimage) - real_states[0] - negative_preimage) if not l.negated}
steps_from_fact = {fact_from_fd(l): full_preimage[l] for l in positive_preimage}
last_from_fact = {fact: min(steps) for fact, steps in steps_from_fact.items() if get_prefix(fact) != EQ}
#stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
# visualize_constraints(map(fact_from_fd, target_facts))
for result, step in function_plan.items():
for fact in result.get_domain():
last_from_fact[fact] = min(step, last_from_fact.get(fact, INF))
# TODO: get_steps_from_stream
stream_plan = []
last_from_stream = dict(function_plan)
for result in current_plan: # + negative_plan?
# TODO: actually compute when these are needed + dependencies
last_from_stream[result] = 0
if isinstance(result.external, Function) or (result.external in negative):
if len(action_plan) > replan_step:
raise NotImplementedError() # TODO: deferring negated optimizers
# Prevents these results from being pruned
function_plan[result] = replan_step
else:
stream_plan.append(result)
curr_evaluations = evaluations_from_stream_plan(evaluations, stream_plan, max_effort=None)
extraction_facts = set(last_from_fact) - set(map(fact_from_evaluation, curr_evaluations))
extract_stream_plan(node_from_atom, extraction_facts, stream_plan)
# Recomputing due to postprocess_stream_plan
stream_plan = postprocess_stream_plan(evaluations, domain, stream_plan, last_from_fact)
node_from_atom = get_achieving_streams(evaluations, stream_plan, max_effort=None)
fact_sequence = [set(result.get_domain()) for result in stream_plan] + [extraction_facts]
for facts in reversed(fact_sequence): # Bellman ford
for fact in facts: # could flatten instead
result = node_from_atom[fact].result
if result is None:
continue
step = last_from_fact[fact] if result.is_deferrable() else 0
last_from_stream[result] = min(step, last_from_stream.get(result, INF))
for domain_fact in result.instance.get_domain():
last_from_fact[domain_fact] = min(last_from_stream[result], last_from_fact.get(domain_fact, INF))
stream_plan.extend(function_plan)
partial_orders = recover_partial_orders(stream_plan, node_from_atom)
bound_objects = set()
for result in stream_plan:
if (last_from_stream[result] == 0) or not result.is_deferrable(bound_objects=bound_objects):
for ancestor in get_ancestors(result, partial_orders) | {result}:
# TODO: this might change descendants of ancestor. Perform in a while loop.
last_from_stream[ancestor] = 0
if isinstance(ancestor, StreamResult):
bound_objects.update(out for out in ancestor.output_objects if out.is_unique())
#local_plan = [] # TODO: not sure what this was for
#for fact, step in sorted(last_from_fact.items(), key=lambda pair: pair[1]): # Earliest to latest
# print(step, fact)
# extract_stream_plan(node_from_atom, [fact], local_plan, last_from_fact, last_from_stream)
# Each stream has an earliest evaluation time
# When computing the latest, use 0 if something isn't deferred
# Evaluate each stream as soon as possible
# Option to defer streams after a point in time?
# TODO: action costs for streams that encode uncertainty
state = set(real_task.init)
remaining_results = list(stream_plan)
first_from_stream = {}
#assert 1 <= replan_step # Plan could be empty
for step, instance in enumerate(action_plan):
for result in list(remaining_results):
# TODO: could do this more efficiently if need be
domain = result.get_domain() + get_fluent_domain(result)
if conditions_hold(state, map(fd_from_fact, domain)):
remaining_results.remove(result)
certified = {fact for fact in result.get_certified() if get_prefix(fact) != EQ}
state.update(map(fd_from_fact, certified))
if step != 0:
first_from_stream[result] = step
# TODO: assumes no fluent axiom domain conditions
apply_action(state, instance)
#assert not remaining_results # Not true if retrace
if first_from_stream:
replan_step = min(replan_step, *first_from_stream.values())
eager_plan = []
results_from_step = defaultdict(list)
for result in stream_plan:
earliest_step = first_from_stream.get(result, 0) # exogenous
latest_step = last_from_stream.get(result, 0) # defer
#assert earliest_step <= latest_step
defer = replan_step <= latest_step
if not defer:
eager_plan.append(result)
# We only perform a deferred evaluation if it has all deferred dependencies
# TODO: make a flag that also allows dependencies to be deferred
future = (earliest_step != 0) or defer
if future:
future_step = latest_step if defer else earliest_step
results_from_step[future_step].append(result)
# TODO: some sort of obj side-effect bug that requires obj_from_pddl to be applied last (likely due to fluent streams)
eager_plan = convert_fluent_streams(eager_plan, real_states, action_plan, steps_from_fact, node_from_atom)
combined_plan = []
for step, action in enumerate(action_plan):
combined_plan.extend(result.get_action() for result in results_from_step[step])
combined_plan.append(transform_action_args(pddl_from_instance(action), obj_from_pddl))
# TODO: the returned facts have the same side-effect bug as above
# TODO: annotate when each preimage fact is used
preimage_facts = {fact_from_fd(l) for l in full_preimage if (l.predicate != EQ) and not l.negated}
for negative_result in negative_plan: # TODO: function_plan
preimage_facts.update(negative_result.get_certified())
for result in eager_plan:
preimage_facts.update(result.get_domain())
# Might not be able to regenerate facts involving the outputs of streams
preimage_facts.update(result.get_certified()) # Some facts might not be in the preimage
# TODO: record streams and axioms
return eager_plan, OptPlan(combined_plan, preimage_facts)
##################################################
def solve_optimistic_temporal(domain, stream_domain, applied_results, all_results,
opt_evaluations, node_from_atom, goal_expression,
effort_weight, debug=False, **kwargs):
# TODO: assert that the unused parameters are off
assert domain is stream_domain
#assert len(applied_results) == len(all_results)
problem = get_problem(opt_evaluations, goal_expression, domain)
with Verbose():
instantiated = instantiate_task(task_from_domain_problem(domain, problem))
if instantiated is None:
return instantiated, None, None, INF
problem = get_problem_pddl(opt_evaluations, goal_expression, domain.pddl)
pddl_plan, makespan = solve_tfd(domain.pddl, problem, debug=debug, **kwargs)
if pddl_plan is None:
return instantiated, None, pddl_plan, makespan
instance_from_action_args = defaultdict(list)
for instance in instantiated.actions:
name, args = parse_action(instance)
instance_from_action_args[name, args].append(instance)
#instance.action, instance.var_mapping
action_instances = []
for action in pddl_plan:
instances = instance_from_action_args[action.name, action.args]
if len(instances) != 1:
for action in instances:
action.dump()
#assert len(instances) == 1 # TODO: support 2 <= case
action_instances.append(instances[0])
temporal_plan = obj_from_pddl_plan(pddl_plan) # pddl_plan is sequential
return instantiated, action_instances, temporal_plan, makespan
def solve_optimistic_sequential(domain, stream_domain, applied_results, all_results,
opt_evaluations, node_from_atom, goal_expression,
effort_weight, debug=False, **kwargs):
#print(sorted(map(fact_from_evaluation, opt_evaluations)))
temporal_plan = None
problem = get_problem(opt_evaluations, goal_expression, stream_domain) # begin_metric
with Verbose(verbose=debug):
task = task_from_domain_problem(stream_domain, problem)
instantiated = instantiate_task(task)
if instantiated is None:
return instantiated, None, temporal_plan, INF
cost_from_action = {action: action.cost for action in instantiated.actions}
add_stream_efforts(node_from_atom, instantiated, effort_weight)
if using_optimizers(applied_results):
add_optimizer_effects(instantiated, node_from_atom)
# TODO: reachieve=False when using optimizers or should add applied facts
instantiate_optimizer_axioms(instantiated, domain, all_results)
action_from_name = rename_instantiated_actions(instantiated, RENAME_ACTIONS)
# TODO: the action unsatisfiable conditions are pruned
with Verbose(debug):
sas_task = sas_from_instantiated(instantiated)
#sas_task.metric = task.use_min_cost_metric
sas_task.metric = True
# TODO: apply renaming to hierarchy as well
# solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential
renamed_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs)
if renamed_plan is None:
return instantiated, None, temporal_plan, INF
action_instances = [action_from_name[name if RENAME_ACTIONS else '({} {})'.format(name, ' '.join(args))]
for name, args in renamed_plan]
cost = get_plan_cost(action_instances, cost_from_action)
return instantiated, action_instances, temporal_plan, cost
##################################################
def plan_streams(evaluations, goal_expression, domain, all_results, negative, effort_weight, max_effort,
simultaneous=False, reachieve=True, replan_actions=set(), **kwargs):
# TODO: alternatively could translate with stream actions on real opt_state and just discard them
# TODO: only consider axioms that have stream conditions?
#reachieve = reachieve and not using_optimizers(all_results)
#for i, result in enumerate(all_results):
# print(i, result, result.get_effort())
applied_results, deferred_results = partition_results(
evaluations, all_results, apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))
stream_domain, deferred_from_name = add_stream_actions(domain, deferred_results)
if reachieve and not using_optimizers(all_results):
achieved_results = {n.result for n in evaluations.values() if isinstance(n.result, Result)}
init_evaluations = {e for e, n in evaluations.items() if n.result not in achieved_results}
applied_results = achieved_results | set(applied_results)
evaluations = init_evaluations # For clarity
# TODO: could iteratively increase max_effort
node_from_atom = get_achieving_streams(evaluations, applied_results, # TODO: apply to all_results?
max_effort=max_effort)
opt_evaluations = {evaluation_from_fact(f): n.result for f, n in node_from_atom.items()}
if UNIVERSAL_TO_CONDITIONAL or using_optimizers(all_results):
goal_expression = add_unsatisfiable_to_goal(stream_domain, goal_expression)
temporal = isinstance(stream_domain, SimplifiedDomain)
optimistic_fn = solve_optimistic_temporal if temporal else solve_optimistic_sequential
instantiated, action_instances, temporal_plan, cost = optimistic_fn(
domain, stream_domain, applied_results, all_results, opt_evaluations,
node_from_atom, goal_expression, effort_weight, **kwargs)
if action_instances is None:
return OptSolution(FAILED, FAILED, cost)
action_instances, axiom_plans = recover_axioms_plans(instantiated, action_instances)
# TODO: extract out the minimum set of conditional effects that are actually required
#simplify_conditional_effects(instantiated.task, action_instances)
stream_plan, action_instances = recover_simultaneous(
applied_results, negative, deferred_from_name, action_instances)
action_plan = transform_plan_args(map(pddl_from_instance, action_instances), obj_from_pddl)
replan_step = min([step+1 for step, action in enumerate(action_plan)
if action.name in replan_actions] or [len(action_plan)+1]) # step after action application
stream_plan, opt_plan = recover_stream_plan(evaluations, stream_plan, opt_evaluations, goal_expression, stream_domain,
node_from_atom, action_instances, axiom_plans, negative, replan_step)
if temporal_plan is not None:
# TODO: handle deferred streams
assert all(isinstance(action, Action) for action in opt_plan.action_plan)
opt_plan.action_plan[:] = temporal_plan
return OptSolution(stream_plan, opt_plan, cost)
| 21,538 |
Python
| 55.091146 | 122 | 0.693611 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/postprocess.py
|
from hsr_tamp.pddlstream.algorithms.downward import get_problem, task_from_domain_problem
from hsr_tamp.pddlstream.algorithms.instantiate_task import sas_from_pddl
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.stream_action import get_stream_actions
from hsr_tamp.pddlstream.algorithms.scheduling.utils import add_unsatisfiable_to_goal
from hsr_tamp.pddlstream.algorithms.search import solve_from_task
from hsr_tamp.pddlstream.language.constants import And
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.utils import flatten, INF
# TODO: rename this to plan streams?
DO_RESCHEDULE = False
#RESCHEDULE_PLANNER = 'ff-astar'
RESCHEDULE_PLANNER = 'lmcut-astar'
#RESCHEDULE_PLANNER = 'ff-lazy'
def reschedule_stream_plan(evaluations, target_facts, domain, stream_results,
unique_binding=False, unsatisfiable=False, max_effort=INF,
planner=RESCHEDULE_PLANNER, max_reschedule_time=10, debug=False):
# TODO: search in space of partially ordered plans
# TODO: constrain selection order to be alphabetical?
domain.actions[:], stream_result_from_name = get_stream_actions(
stream_results, unique_binding=unique_binding)
goal_expression = And(*target_facts)
if unsatisfiable: # TODO: ensure that the copy hasn't harmed anything
goal_expression = add_unsatisfiable_to_goal(domain, goal_expression)
reschedule_problem = get_problem(evaluations, goal_expression, domain, unit_costs=False)
reschedule_task = task_from_domain_problem(domain, reschedule_problem)
#reschedule_task.axioms = [] # TODO: ensure that the constants are added in the event that axioms are needed?
sas_task = sas_from_pddl(reschedule_task)
stream_names, effort = solve_from_task(sas_task, planner=planner, max_planner_time=max_reschedule_time,
max_cost=max_effort, debug=debug)
if stream_names is None:
return None
stream_plan = [stream_result_from_name[name] for name, _ in stream_names]
return stream_plan
##################################################
def shorten_stream_plan(evaluations, stream_plan, target_facts):
all_subgoals = set(target_facts) | set(flatten(r.instance.get_domain() for r in stream_plan))
evaluation_subgoals = set(filter(evaluations.__contains__, map(evaluation_from_fact, all_subgoals)))
open_subgoals = set(filter(lambda f: evaluation_from_fact(f) not in evaluations, all_subgoals))
results_from_fact = {}
for result in stream_plan:
for fact in result.get_certified():
results_from_fact.setdefault(fact, []).append(result)
for removed_result in reversed(stream_plan): # TODO: only do in order?
certified_subgoals = open_subgoals & set(removed_result.get_certified())
if not certified_subgoals: # Could combine with following
new_stream_plan = stream_plan[:]
new_stream_plan.remove(removed_result)
return new_stream_plan
if all(2 <= len(results_from_fact[fact]) for fact in certified_subgoals):
node_from_atom = get_achieving_streams(evaluation_subgoals, set(stream_plan) - {removed_result})
if all(fact in node_from_atom for fact in target_facts):
new_stream_plan = []
extract_stream_plan(node_from_atom, target_facts, new_stream_plan)
return new_stream_plan
return None
def prune_stream_plan(evaluations, stream_plan, target_facts):
while True:
new_stream_plan = shorten_stream_plan(evaluations, stream_plan, target_facts)
if new_stream_plan is None:
break
stream_plan = new_stream_plan
return stream_plan
##################################################
def postprocess_stream_plan(evaluations, domain, stream_plan, target_facts):
stream_plan = prune_stream_plan(evaluations, stream_plan, target_facts)
if DO_RESCHEDULE:
# TODO: detect this based on unique or not
# TODO: maybe test if partial order between two ways of achieving facts, if not prune
new_stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_plan)
if new_stream_plan is not None:
return new_stream_plan
return stream_plan
| 4,434 |
Python
| 51.797618 | 113 | 0.690347 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/recover_axioms.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.algorithms.downward import get_literals, apply_action, \
get_derived_predicates, literal_holds, GOAL_NAME, get_precondition
from hsr_tamp.pddlstream.algorithms.instantiate_task import get_goal_instance, filter_negated, get_achieving_axioms
from hsr_tamp.pddlstream.language.constants import is_parameter
from hsr_tamp.pddlstream.utils import Verbose, MockSet, safe_zip, flatten
import copy
import pddl
import axiom_rules
def get_necessary_axioms(conditions, axioms, negative_from_name):
if not conditions or not axioms:
return {}
axioms_from_name = get_derived_predicates(axioms)
atom_queue = []
processed_atoms = set()
def add_literals(literals):
for lit in literals:
atom = lit.positive()
if atom not in processed_atoms:
atom_queue.append(atom) # Previously was lit.positive() for some reason?
processed_atoms.add(atom)
add_literals(conditions)
axiom_from_action = {}
partial_instantiations = set()
while atom_queue:
literal = atom_queue.pop()
for axiom in axioms_from_name[literal.predicate]:
derived_parameters = axiom.parameters[:axiom.num_external_parameters]
var_mapping = {p.name: a for p, a in zip(derived_parameters, literal.args) if not is_parameter(a)}
key = (axiom, frozenset(var_mapping.items()))
if key in partial_instantiations:
continue
partial_instantiations.add(key)
parts = [l.rename_variables(var_mapping) for l in get_literals(axiom.condition)
if l.predicate not in negative_from_name] # Assumes a conjunction?
# new_condition = axiom.condition.uniquify_variables(None, var_mapping)
effect_args = [var_mapping.get(a.name, a.name) for a in derived_parameters]
effect = pddl.Effect([], pddl.Truth(), pddl.conditions.Atom(axiom.name, effect_args))
free_parameters = [p for p in axiom.parameters if p.name not in var_mapping]
new_action = pddl.Action(axiom.name, free_parameters, len(free_parameters),
pddl.Conjunction(parts), [effect], None)
# Creating actions so I can partially instantiate (impossible with axioms)
axiom_from_action[new_action] = (axiom, var_mapping)
add_literals(parts)
return axiom_from_action
##################################################
def instantiate_necessary_axioms(model, static_facts, fluent_facts, axiom_remap={}):
instantiated_axioms = []
for atom in model:
if isinstance(atom.predicate, pddl.Action):
action = atom.predicate
var_mapping = {p.name: a for p, a in zip(action.parameters, atom.args)}
axiom, existing_var_mapping = axiom_remap[action]
var_mapping.update(existing_var_mapping)
inst_axiom = axiom.instantiate(var_mapping, static_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
return instantiated_axioms
##################################################
def extract_axioms(state, axiom_from_atom, conditions, axiom_plan, negated_from_name={}):
success = True
for fact in filter_negated(conditions, negated_from_name):
if literal_holds(state, fact):
continue
if fact not in axiom_from_atom:
print('Fact is not achievable:', fact)
success = False
continue
axiom = axiom_from_atom[fact]
if (axiom is None) or (axiom in axiom_plan):
continue
extract_axioms(state, axiom_from_atom, axiom.condition, axiom_plan, negated_from_name=negated_from_name)
axiom_plan.append(axiom)
return success
##################################################
def is_useful_atom(atom, conditions_from_predicate):
# TODO: this is currently a bottleneck. Instantiate for all actions along the plan first? (apply before checking)
if not isinstance(atom, pddl.Atom):
return False
for atom2 in conditions_from_predicate[atom.predicate]:
if all(is_parameter(a2) or (a1 == a2) for a1, a2 in safe_zip(atom.args, atom2.args)):
return True
return False
def extraction_helper(state, instantiated_axioms, goals, negative_from_name={}):
# TODO: filter instantiated_axioms that aren't applicable?
import options
with Verbose(False):
# axioms, axiom_init, axiom_layer_dict = axiom_rules.handle_axioms(actions, axioms, goals)
all_axioms, axiom_layers = axiom_rules.handle_axioms(
operators=[], axioms=instantiated_axioms, goals=goals, layer_strategy=options.layer_strategy)
axiom_init = set() # TODO: new FastDownward does not use axiom_init
helpful_axioms = []
for axiom in all_axioms:
if axiom.effect in goals: # TODO: double check this
helpful_axioms.append(axiom)
init_atom = axiom.effect.negate()
if axiom.effect in axiom_init:
raise RuntimeError('Bug introduced by new "downward" where both the positive and negative atoms '
'of literal {} are in the initial state'.format(init_atom.positive()))
axiom_init.add(init_atom)
axiom_effects = {axiom.effect for axiom in helpful_axioms}
#assert len(axiom_effects) == len(axiom_init)
for pre in list(goals) + list(axiom_effects):
if pre.positive() not in axiom_init:
axiom_init.add(pre.positive().negate())
goal_action = pddl.PropositionalAction(GOAL_NAME, goals, [], None)
axiom_from_atom, _ = get_achieving_axioms(state | axiom_init, helpful_axioms + [goal_action], negative_from_name)
axiom_plan = [] # Could always add all conditions
success = extract_axioms(state | axiom_init, axiom_from_atom, goals, axiom_plan, negative_from_name)
if not success:
print('Warning! Could not extract an axiom plan')
#return None
return axiom_plan
def extract_axiom_plan(task, goals, negative_from_name, static_state=set()):
import pddl_to_prolog
import build_model
import instantiate
# TODO: only reinstantiate the negative axioms
if not negative_from_name:
return []
axioms_from_name = get_derived_predicates(task.axioms)
derived_goals = {l for l in goals if l.predicate in axioms_from_name}
assert all(literal_holds(task.init, l) # or (l.predicate in negative_from_name)
for l in set(goals) - derived_goals)
axiom_from_action = get_necessary_axioms(derived_goals, task.axioms, negative_from_name)
if not axiom_from_action:
return []
conditions_from_predicate = defaultdict(set)
for axiom, mapping in axiom_from_action.values():
for literal in get_literals(axiom.condition):
conditions_from_predicate[literal.predicate].add(literal.rename_variables(mapping))
original_init = task.init
original_actions = task.actions
original_axioms = task.axioms
# TODO: retrieve initial state based on if helpful
task.init = {atom for atom in task.init if is_useful_atom(atom, conditions_from_predicate)}
# TODO: store map from predicate to atom
task.actions = axiom_from_action.keys()
task.axioms = []
# TODO: maybe it would just be better to drop the negative throughout this process until this end
with Verbose(verbose=False):
model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init
opt_facts = instantiate.get_fluent_facts(task, model) | (task.init - static_state)
mock_fluent = MockSet(lambda item: (item.predicate in negative_from_name) or (item in opt_facts))
instantiated_axioms = instantiate_necessary_axioms(model, static_state, mock_fluent, axiom_from_action)
axiom_plan = extraction_helper(task.init, instantiated_axioms, derived_goals, negative_from_name)
task.init = original_init
task.actions = original_actions
task.axioms = original_axioms
return axiom_plan
##################################################
def backtrack_axioms(conditions, axioms_from_effect, visited_atoms):
visited_axioms = []
for atom in conditions:
if atom in visited_atoms:
continue
visited_atoms.add(atom)
for axiom in axioms_from_effect[atom]:
visited_axioms.append(axiom)
visited_axioms.extend(backtrack_axioms(axiom.condition, axioms_from_effect, visited_atoms))
return visited_axioms
def recover_axioms_plans(instantiated, action_instances):
#axioms, axiom_init, _ = axiom_rules.handle_axioms(
# instantiated.actions, instantiated.axioms, instantiated.goal_list)
new_action_instances = [copy.deepcopy(instance) for instance in action_instances]
axioms, axiom_init = instantiated.axioms, [] # TODO: bug when needing to reachieve negated
axioms_from_effect = defaultdict(list)
for axiom in axioms:
axioms_from_effect[axiom.effect].append(axiom)
axioms_from_name = get_derived_predicates(instantiated.task.axioms)
state = set(instantiated.task.init) | set(axiom_init)
axiom_plans = []
for action in new_action_instances + [get_goal_instance(instantiated.task.goal)]:
all_conditions = list(get_precondition(action)) + list(flatten(
cond for cond, _ in action.add_effects + action.del_effects))
axioms = backtrack_axioms(all_conditions, axioms_from_effect, set())
axiom_from_atom, _ = get_achieving_axioms(state, axioms)
action.applied_effects = []
for effects in [action.add_effects, action.del_effects]:
negate = (effects is action.del_effects)
for i, (conditions, effect) in reversed(list(enumerate(effects))):
if all(literal_holds(state, literal) or (literal in axiom_from_atom) for literal in conditions):
action.precondition.extend(conditions)
effects[i] = ([], effect)
action.applied_effects.append(effect.negate() if negate else effect)
else:
effects.pop(i)
# RuntimeError: Preimage fact ('new-axiom@0',) is not achievable!
#precondition = action.precondition # TODO: strange bug if this applies
precondition = [literal for literal in action.precondition if literal.predicate in axioms_from_name]
axiom_plans.append([])
success = extract_axioms(state, axiom_from_atom, precondition, axiom_plans[-1])
if not success:
print(all_conditions)
print(action)
print(axioms)
raise RuntimeError('Could not extract axioms')
apply_action(state, action)
return new_action_instances, axiom_plans
| 10,854 |
Python
| 47.67713 | 117 | 0.65386 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/algorithms/scheduling/add_optimizers.py
|
from hsr_tamp.pddlstream.algorithms.downward import fd_from_fact, fact_from_fd
from hsr_tamp.pddlstream.algorithms.scheduling.negative import get_negative_result
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import extract_stream_plan
from hsr_tamp.pddlstream.algorithms.scheduling.utils import get_instance_facts
from hsr_tamp.pddlstream.language.optimizer import ComponentStream
from hsr_tamp.pddlstream.language.constants import get_args, get_prefix
from hsr_tamp.pddlstream.language.stream import Stream
def using_optimizers(results):
return any(isinstance(result.external, ComponentStream) for result in results)
def add_optimizer_effects(instantiated, node_from_atom):
# TODO: instantiate axioms with negative on effects for blocking
# TODO: fluent streams using conditional effects. Special fluent predicate for inputs to constraint
# TODO: bug! The FD instantiator prunes the result.external.stream_fact
for instance in instantiated.actions:
# TODO: need to handle case where a negative preconditions is used in an optimizer
for condition, effect in (instance.add_effects + instance.del_effects):
for literal in condition:
fact = fact_from_fd(literal)
if (fact in node_from_atom) and (node_from_atom[fact].result is not None):
raise NotImplementedError(literal)
facts = get_instance_facts(instance, node_from_atom)
stream_plan = []
extract_stream_plan(node_from_atom, facts, stream_plan)
# TODO: can detect if some of these are simultaneous and add them as preconditions
for result in stream_plan:
#if isinstance(result.external, ComponentStream):
if True: # TODO: integrate sampler and optimizer treatments
# TODO: need to make multiple versions if several ways of achieving the action
atom = fd_from_fact(result.stream_fact)
instantiated.atoms.add(atom)
effect = (tuple(), atom)
instance.add_effects.append(effect)
instance.effect_mappings.append(effect + (None, None))
# domain = {fact for result in stream_plan if result.external.info.simultaneous
# for fact in result.instance.get_domain()}
# TODO: can streams depending on these be used if dependent preconditions are added to the action
def recover_simultaneous(results, negative_streams, deferred_from_name, instances):
result_from_stream_fact = {}
for result in results:
if isinstance(result.external, Stream):
assert result.stream_fact not in result_from_stream_fact
result_from_stream_fact[result.stream_fact] = result
negative_from_stream_predicate = {}
for state_stream in negative_streams:
if not isinstance(state_stream, Stream):
continue
predicate = get_prefix(state_stream.stream_fact)
if predicate in negative_from_stream_predicate:
# TODO: could make a conjunction condition instead
raise NotImplementedError()
negative_from_stream_predicate[predicate] = state_stream
stream_plan = []
action_plan = []
for instance in instances:
if instance.name in deferred_from_name:
result = deferred_from_name[instance.name]
if result not in stream_plan:
stream_plan.append(result)
else:
action_plan.append(instance)
for conditions, effect in instance.add_effects:
# Assumes effects are in order
assert not conditions
fact = fact_from_fd(effect)
if fact in result_from_stream_fact:
result = result_from_stream_fact[fact]
elif effect.predicate in negative_from_stream_predicate:
negative = negative_from_stream_predicate[effect.predicate]
result = get_negative_result(negative, get_args(fact))
else:
continue
if result not in stream_plan:
stream_plan.append(result)
return stream_plan, action_plan
| 4,173 |
Python
| 50.530864 | 113 | 0.668584 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/synthesizer.py
|
from collections import deque, Counter
from hsr_tamp.pddlstream.algorithms.reorder import get_partial_orders
from hsr_tamp.pddlstream.language.constants import is_plan
from hsr_tamp.pddlstream.language.conversion import substitute_expression
from hsr_tamp.pddlstream.language.function import FunctionResult
from hsr_tamp.pddlstream.language.optimizer import get_cluster_values, OptimizerResult
from hsr_tamp.pddlstream.language.statistics import Performance
from hsr_tamp.pddlstream.language.stream import Stream, StreamInstance, StreamResult, StreamInfo
from hsr_tamp.pddlstream.utils import neighbors_from_orders
def decompose_result(result):
if isinstance(result, SynthStreamResult):
return result.decompose()
elif isinstance(result, OptimizerResult):
return result.external.stream_plan
return [result]
def decompose_stream_plan(stream_plan):
if not is_plan(stream_plan):
return stream_plan
new_stream_plan = []
for result in stream_plan:
new_stream_plan.extend(decompose_result(result))
return new_stream_plan
class SynthStreamResult(StreamResult):
def get_functions(self):
return substitute_expression(self.instance.external.functions, self.mapping)
def decompose(self):
results = []
for i, stream in enumerate(self.instance.external.streams):
macro_from_micro = self.instance.external.macro_from_micro[i]
input_objects = tuple(self.mapping[macro_from_micro[inp]] for inp in stream.inputs)
instance = stream.get_instance(input_objects)
output_objects = tuple(self.mapping[macro_from_micro[out]] for out in stream.outputs)
results.append(StreamResult(instance, output_objects))
return results
class SynthStreamInstance(StreamInstance):
pass
#def decompose(self):
# return self.streams
class SynthStream(Stream):
# TODO: wild stream optimizer
_Instance = SynthStreamInstance
_Result = SynthStreamResult
def __init__(self, synthesizer, inputs, domain, outputs, certified, functions,
streams, macro_from_micro):
def gen_fn(*input_values): # TODO: take in guess values for inputs?
assert (len(inputs) == len(input_values))
mapping = dict(zip(inputs, input_values))
targets = substitute_expression(certified | functions, mapping)
return synthesizer.gen_fn(outputs, targets) # TODO: could also return a map
#info = None # TODO: stream info
info = StreamInfo() # TODO: use StreamSynthesizer?
super(SynthStream, self).__init__(synthesizer.name, gen_fn, inputs, domain, outputs, certified, info)
self.synthesizer = synthesizer
self.streams = streams
self.functions = tuple(functions)
self.macro_from_micro = macro_from_micro
def update_statistics(self, overhead, success):
self.synthesizer.update_statistics(overhead, success)
def get_p_success(self):
return self.synthesizer.get_p_success()
def get_overhead(self):
return self.synthesizer.get_overhead()
#def decompose(self):
# return self.streams
##################################################
class StreamSynthesizer(Performance): # JointStream | Stream Combiner
def __init__(self, name, streams, gen_fn, post_only=False):
super(StreamSynthesizer, self).__init__(name, StreamInfo())
self.name = name
self.streams = {s.lower(): m for s, m in streams.items()}
self.gen_fn = gen_fn
self.macro_results = {}
self.post_only = post_only
#def get_instances(self):
# raise NotImplementedError()
def get_synth_stream(self, stream_plan):
key = frozenset(stream_plan)
if key in self.macro_results:
return self.macro_results[key]
streams = list(filter(lambda r: isinstance(r, StreamResult), stream_plan))
if len(streams) < 1: # No point if only one...
return None
inputs, domain, outputs, certified, functions, macro_from_micro, \
input_objects, output_objects, fluent_facts = get_cluster_values(stream_plan)
if fluent_facts:
raise NotImplementedError()
mega_stream = SynthStream(self, inputs, domain,
outputs, certified, functions,
streams, macro_from_micro)
mega_instance = mega_stream.get_instance(input_objects)
self.macro_results[key] = SynthStreamResult(mega_instance, output_objects)
return self.macro_results[key]
def __repr__(self):
return '{}{}'.format(self.name, self.streams)
# TODO: worthwhile noting that the focused algorithm does not search over all plan skeletons directly...
##################################################
# TODO: factor this into algorithms
# TODO:
# 1) Iteratively resolve for the next stream plan to apply rather than do in sequence
# 2) Apply to a constraint network specifcation
# 3) Satisfy a constraint network were free variables aren't given by streams
# 4) Allow algorithms to return not feasible to combine rather than impose minimums
# 5) Make a method (rather than a spec) that traverses the constraint graph and prunes weak links/constraints that can't be planned
# 6) Post process all feasible skeletons at once
# 7) Planning and execution view of the algorithm
# 8) Algorithm that does the matching of streams to variables
# 9) Add implied facts (e.g. types) to the constraint network as preconditons
def expand_cluster(synthesizer, v, neighbors, processed):
cluster = {v}
queue = deque([v])
while queue:
v1 = queue.popleft()
for v2 in neighbors[v1]:
if (v2 not in processed) and (v2.instance.external.name in synthesizer.streams):
cluster.add(v2)
queue.append(v2)
processed.add(v2)
return cluster
def get_synthetic_stream_plan(stream_plan, synthesizers):
# TODO: fix this implementation of this to be as follows:
# 1) Prune graph not related
# 2) Cluster
# 3) Try combinations of replacing on stream plan
if not is_plan(stream_plan) or (not synthesizers):
return stream_plan
orders = get_partial_orders(stream_plan)
for order in list(orders):
orders.add(order[::-1])
neighbors, _ = neighbors_from_orders(orders)
# TODO: what if many possibilities?
# TODO: cluster first and then plan using the macro and regular streams
processed = set()
new_stream_plan = []
for result in stream_plan: # Processing in order is important
if result in processed:
continue
processed.add(result)
# TODO: assert that it has at least one thing in it
for synthesizer in synthesizers:
# TODO: something could be an input and output of a cut...
if result.instance.external.name not in synthesizer.streams:
continue
# TODO: need to ensure all are covered I think?
# TODO: don't do if no streams within
cluster = expand_cluster(synthesizer, result, neighbors, processed)
counts = Counter(r.instance.external.name for r in cluster)
if not all(n <= counts[name] for name, n in synthesizer.streams.items()):
continue
ordered_cluster = [r for r in stream_plan if r in cluster]
synthesizer_result = synthesizer.get_synth_stream(ordered_cluster)
if synthesizer_result is None:
continue
new_stream_plan.append(synthesizer_result)
new_stream_plan.extend(filter(lambda s: isinstance(s, FunctionResult), ordered_cluster))
break
else:
new_stream_plan.append(result)
return new_stream_plan
##################################################
"""
def get_synthetic_stream_plan2(stream_plan, synthesizers):
# TODO: pass subgoals along the plan in directly
# TODO: could just do this on the objects themselves to start
free_parameters = set()
for result in stream_plan:
if isinstance(result, StreamResult):
free_parameters.update(result.output_objects)
print(free_parameters)
# TODO: greedy method first
new_plan = []
facts = set()
while True:
candidates = []
for result in stream_plan:
if result.instance.get_domain() <= facts:
candidates.append(result)
selection = candidates[-1]
new_plan.append(selection)
print(new_plan)
print(stream_plan)
print(synthesizers)
raise NotImplementedError()
"""
| 8,674 |
Python
| 41.73399 | 132 | 0.655983 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/skeleton.py
|
from __future__ import print_function
import time
from collections import namedtuple, Sized
from heapq import heappush, heappop, heapreplace
from operator import itemgetter
from hsr_tamp.pddlstream.algorithms.common import is_instance_ready, EvaluationNode
from hsr_tamp.pddlstream.algorithms.disabled import process_instance, update_bindings, update_cost, bind_action_plan
from hsr_tamp.pddlstream.language.constants import is_plan, INFEASIBLE
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.utils import elapsed_time, HeapElement, safe_zip, get_mapping
# The motivation for immediately instantiating is to avoid unnecessary sampling
# Consider a stream result DAG A -> C, B -> C
# If A is already successfully sampled, don't want to resample A until B is sampled
def puncture(sequence, index):
return sequence[:index] + sequence[index+1:]
##################################################
class Skeleton(object):
def __init__(self, queue, stream_plan, action_plan, cost):
self.index = len(queue.skeletons)
queue.skeletons.append(self)
self.queue = queue
self.stream_plan = stream_plan
self.action_plan = action_plan
self.cost = cost
self.root = Binding(self.queue, self,
stream_indices=range(len(stream_plan)),
stream_attempts=[0]*len(stream_plan),
bound_results={},
bindings={},
cost=cost)
self.best_binding = self.root
def bind_stream_plan(self, mapping, indices=None):
if indices is None:
indices = range(len(self.stream_plan))
return [self.stream_plan[index].remap_inputs(mapping) for index in indices]
#def __repr__(self):
# return repr(self.action_plan)
##################################################
# TODO: only branch on new bindings caused by new stream outputs
# TODO: delete a binding if a stream is exhausted
# TODO: evaluate all stream instances in queue below a target effort
Priority = namedtuple('Priority', ['attempted', 'effort'])
class Binding(object):
# TODO: maintain a tree instead. Propagate the best subtree upwards
def __init__(self, queue, skeleton, stream_indices, stream_attempts,
bound_results, bindings, cost):
self.queue = queue
self.skeleton = skeleton
assert len(stream_indices) == len(stream_attempts)
self.stream_indices = list(stream_indices)
self.stream_attempts = list(stream_attempts)
self.bound_results = bound_results
self.bindings = bindings
self.cost = cost
self.children = []
self.enumerated = False # What if result enumerated with zero calls?
self._remaining_results = None
# Maybe just reset the indices for anything that isn't applicable
# n+1 sample represented
# TODO: store partial orders
# TODO: store applied results
# TODO: the problem is that I'm not actually doing all combinations because I'm passing attempted
@property
def attempts_from_index(self):
return get_mapping(self.stream_indices, self.stream_attempts)
@property
def remaining_results(self):
if self._remaining_results is None:
self._remaining_results = self.skeleton.bind_stream_plan(self.bindings, self.stream_indices)
return self._remaining_results
@property
def action_plan(self):
return bind_action_plan(self.skeleton.action_plan, self.bindings)
@property
def result(self):
return self.remaining_results[0]
@property
def index(self):
return self.stream_indices[0]
@property
def attempts(self):
return self.stream_attempts[0]
def is_bound(self):
return not self.stream_indices
def is_dominated(self):
# TODO: what should I do if the cost=inf (from incremental/exhaustive)
return self.queue.store.has_solution() and (self.queue.store.best_cost <= self.cost)
def is_enabled(self):
return not (self.enumerated or self.is_dominated())
def post_order(self):
for child in self.children:
for binding in child.post_order():
yield binding
yield self
def get_priority(self):
# Infinite cost if skeleton is exhausted
# Attempted is equivalent to whether any stream result is disabled
num_attempts = sum(self.stream_attempts)
attempted = num_attempts != 0
# TODO: lexicographic tiebreaking using plan cost and other skeleton properties
return Priority(attempted, (num_attempts, len(self.stream_attempts)))
def get_element(self):
return HeapElement(self.get_priority(), self)
def get_key(self):
# Each stream result is unique (affects hashing)
return self.skeleton, tuple(self.stream_indices), frozenset(self.bindings.items())
def _instantiate(self, index, new_result):
if not new_result.is_successful():
return None # TODO: check if satisfies target certified
opt_result = self.remaining_results[index]
#if not isinstance(new_result, StreamResult) or not new_result.output_objects:
# self.stream_indices = puncture(self.stream_indices, index)
# self.stream_attempts = puncture(self.stream_attempts, index)
# self.bound_results[self.stream_indices[index]] = new_result
# self.cost = update_cost(self.cost, opt_result, new_result)
# self._remaining_results = puncture(self._remaining_results, index)
# self.queue.disable_binding(self)
# self.queue.new_binding(self)
# return self
bound_results = self.bound_results.copy()
bound_results[self.stream_indices[index]] = new_result
binding = Binding(self.queue, self.skeleton,
puncture(self.stream_indices, index),
puncture(self.stream_attempts, index),
bound_results,
update_bindings(self.bindings, opt_result, new_result),
update_cost(self.cost, opt_result, new_result))
#if not isinstance(new_result, StreamResult) or not new_result.output_objects:
# binding._remaining_results = puncture(self._remaining_results, index)
if len(binding.stream_indices) < len(self.skeleton.best_binding.stream_indices):
self.skeleton.best_binding = binding
self.children.append(binding)
self.queue.new_binding(binding)
#if not isinstance(new_result, StreamResult) or not new_result.output_objects:
# # The binding is dominated
# self.enumerated = True
# self.queue.update_enabled(self)
return binding
def update_instances(self):
updated = False
for index, (opt_result, attempt) in enumerate(safe_zip(self.remaining_results, self.stream_attempts)):
if self.enumerated:
return updated
if opt_result.instance.num_calls != attempt:
updated = True
for new_result in opt_result.instance.get_results(start=attempt):
self._instantiate(index, new_result)
self.stream_attempts[index] = opt_result.instance.num_calls
self.enumerated |= opt_result.instance.enumerated
return updated
def __repr__(self):
#return '{}({})'.format(self.__class__.__name__, str_from_object(self.remaining_stream_plan))
#return '{}({})'.format(self.__class__.__name__, str_from_object(self.action_plan))
return '{}(skeleton={}, remaining={})'.format(
self.__class__.__name__, self.skeleton.index, self.stream_indices) #str_from_object(self.attempts_from_index))
##################################################
class SkeletonQueue(Sized):
# TODO: handle this in a partially ordered way
# TODO: alternatively store just preimage and reachieve
# TODO: make an "action" for returning to the search (if it is the best decision)
# TODO: could just maintain a list of active instances and sample/propagate
# TODO: store bindings in a factored form that only combines when needed
# TODO: update bindings given outcomes of eager streams
# TODO: immediately evaluate eager streams in the queue
def __init__(self, store, domain, disable=True):
self.store = store
self.evaluations = store.evaluations
self.domain = domain
self.skeletons = []
self.queue = []
self.binding_from_key = {}
self.bindings_from_instance = {}
self.enabled_bindings = set()
self.disable = disable
####################
def _flush_stale(self):
while self.queue:
queue_priority, binding = self.queue[0]
current_priority = binding.get_priority()
if queue_priority == current_priority:
return
heapreplace(self.queue, binding.get_element())
####################
#def _reenable_stream_plan(self, stream_plan):
# # TODO: only disable if not used elsewhere
# # TODO: could just hash instances
# # TODO: do I actually need to reenable? Yes it ensures that
# # TODO: check if the index is the only one being sampled
# # for result in stream_plan:
# # result.instance.disabled = False
# stream_plan[0].instance.enable(self.evaluations, self.domain)
# # TODO: move functions as far forward as possible to prune these plans
# # TODO: make function evaluations low success as soon as finite cost
# Maybe the reason repeat skeletons are happening is that the currently active thing is disabled
# But another one on the plan isn't
# Could scan the whole queue each time a solution is found
def update_enabled(self, binding):
if not binding.is_enabled() and (binding in self.enabled_bindings):
self.disable_binding(binding)
def is_enabled(self, binding):
self.update_enabled(binding)
return binding in self.enabled_bindings
def enable_binding(self, binding):
assert binding not in self.enabled_bindings
self.enabled_bindings.add(binding)
for result in binding.remaining_results:
instance = result.instance
if instance not in self.bindings_from_instance:
self.bindings_from_instance[instance] = set()
self.bindings_from_instance[instance].add(binding)
def disable_binding(self, binding):
assert binding in self.enabled_bindings
self.enabled_bindings.remove(binding)
for result in binding.remaining_results:
instance = result.instance
if instance in self.bindings_from_instance:
if binding in self.bindings_from_instance[instance]:
self.bindings_from_instance[instance].remove(binding)
if not self.bindings_from_instance[instance]:
del self.bindings_from_instance[instance]
####################
def new_binding(self, binding):
key = binding.get_key()
if key in self.binding_from_key:
print('Binding already visited!') # Could happen if binding is the same
#return
self.binding_from_key[key] = binding
if not binding.is_enabled():
return
if not binding.stream_indices:
# if is_solution(self.domain, self.evaluations, bound_plan, self.goal_expression):
self.store.add_plan(binding.action_plan, binding.cost)
# TODO: could update active for all items in a queue fashion
return
binding.update_instances()
if binding.is_enabled():
self.enable_binding(binding)
heappush(self.queue, binding.get_element())
def new_skeleton(self, stream_plan, action_plan, cost):
skeleton = Skeleton(self, stream_plan, action_plan, cost)
self.new_binding(skeleton.root)
####################
def _generate_results(self, instance):
# assert(instance.opt_index == 0)
if not is_instance_ready(self.evaluations, instance):
raise RuntimeError(instance)
new_results, _ = process_instance(self.store, self.domain, instance, disable=self.disable)
is_new = bool(new_results)
for i, binding in enumerate(list(self.bindings_from_instance[instance])):
#print(i, binding)
# Maybe this list grows but not all the things are accounted for
if self.is_enabled(binding):
binding.update_instances()
self.update_enabled(binding)
#print()
return is_new
def _process_root(self):
is_new = False
self._flush_stale()
_, binding = heappop(self.queue)
if not self.is_enabled(binding):
return is_new
assert not binding.update_instances() #self.update_enabled(binding)
is_new = self._generate_results(binding.result.instance)
# _decompose_synthesizer_skeleton(queue, skeleton, stream_index)
if self.is_enabled(binding):
heappush(self.queue, binding.get_element())
return is_new
####################
def is_active(self):
return self.queue and (not self.store.is_terminated())
def greedily_process(self):
while self.is_active():
self._flush_stale()
key, _ = self.queue[0]
if key.attempted:
break
self._process_root()
def process_until_new(self):
# TODO: process the entire queue once instead
is_new = False
while self.is_active() and (not is_new):
is_new |= self._process_root()
self.greedily_process()
return is_new
def timed_process(self, max_time):
start_time = time.time()
while self.is_active() and (elapsed_time(start_time) <= max_time):
self._process_root()
self.greedily_process()
# TODO: print cost updates when progress with a new skeleton
def accelerate_best_bindings(self):
# TODO: reset the values for old streams
for skeleton in self.skeletons:
for _, result in sorted(skeleton.best_binding.bound_results.items(), key=itemgetter(0)):
# TODO: just accelerate the facts within the plan preimage
result.call_index = 0 # Pretends the fact was first
new_complexity = result.compute_complexity(self.evaluations)
for fact in result.get_certified():
evaluation = evaluation_from_fact(fact)
if new_complexity < self.evaluations[evaluation].complexity:
self.evaluations[evaluation] = EvaluationNode(new_complexity, result)
def process(self, stream_plan, action_plan, cost, complexity_limit, max_time=0):
# TODO: manually add stream_plans for synthesizers/optimizers
start_time = time.time()
if is_plan(stream_plan):
#print([result for result in stream_plan if result.optimistic])
#raw_input('New skeleton')
self.new_skeleton(stream_plan, action_plan, cost)
self.greedily_process()
elif stream_plan is INFEASIBLE:
# TODO: use complexity_limit
self.process_until_new()
self.timed_process(max_time - elapsed_time(start_time))
self.accelerate_best_bindings()
#print(len(self.queue), len(self.skeletons),
# len(self.bindings_from_instance), len(self.binding_from_key))
# Only currently blocking streams with after called
# Can always process streams with a certain complexity
# Temporarily pop off the queue and then re-add
# Domination occurs when no downstream skeleton that
# Is it worth even doing the dynamic instantiation?
# If some set fails where the output is an input
# Scale input
def __len__(self):
return len(self.queue)
##################################################
# from hsr_tamp.pddlstream.language.synthesizer import SynthStreamResult
# def _decompose_synthesizer_skeleton(queue, skeleton, index):
# stream_plan, plan_attempts, bindings, plan_index, cost = skeleton
# opt_result = stream_plan[index]
# if (plan_attempts[index] == 0) and isinstance(opt_result, SynthStreamResult):
# # TODO: only decompose if failure?
# decomposition = opt_result.decompose()
# new_stream_plan = stream_plan[:index] + decomposition + stream_plan[index+1:]
# new_plan_attempts = plan_attempts[:index] + [0]*len(decomposition) + plan_attempts[index+1:]
# queue.new_binding(new_stream_plan, new_plan_attempts, bindings, plan_index, cost)
##################################################
# TODO: want to minimize number of new sequences as they induce overhead
# TODO: estimate how many times a stream needs to be queried (acceleration)
#
# def compute_sampling_cost(stream_plan, stats_fn=get_stream_stats):
# # TODO: we are in a POMDP. If not the case, then the geometric cost policy is optimal
# if stream_plan is None:
# return INF
# expected_cost = 0
# for result in reversed(stream_plan):
# p_success, overhead = stats_fn(result)
# expected_cost += geometric_cost(overhead, p_success)
# return expected_cost
# # TODO: mix between geometric likelihood and learned distribution
# # Sum tail distribution for number of future
# # Distribution on the number of future attempts until successful
# # Average the tail probability mass
#
# def compute_belief(attempts, p_obs):
# return pow(p_obs, attempts)
#
# def compute_success_score(plan_attempts, p_obs=.9):
# beliefs = [compute_belief(attempts, p_obs) for attempts in plan_attempts]
# prior = 1.
# for belief in beliefs:
# prior *= belief
# return -prior
#
# def compute_geometric_score(plan_attempts, overhead=1, p_obs=.9):
# # TODO: model the decrease in belief upon each failure
# # TODO: what if stream terminates? Assign high cost
# expected_cost = 0
# for attempts in plan_attempts:
# p_success = compute_belief(attempts, p_obs)
# expected_cost += geometric_cost(overhead, p_success)
# return expected_cost
##################################################
# from hsr_tamp.pddlstream.algorithms.downward import task_from_domain_problem, get_problem, get_action_instances, \
# get_goal_instance, plan_preimage, is_valid_plan, substitute_derived, is_applicable, apply_action
# from hsr_tamp.pddlstream.algorithms.reorder import replace_derived
# from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import extract_axiom_plan
# def is_solution(domain, evaluations, action_plan, goal_expression):
# task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain, unit_costs=True))
# action_instances = get_action_instances(task, action_plan) + [get_goal_instance(task.goal)]
# #original_init = task.init
# task.init = set(task.init)
# for instance in action_instances:
# axiom_plan = extract_axiom_plan(task, instance, negative_from_name={}, static_state=task.init)
# if axiom_plan is None:
# return False
# #substitute_derived(axiom_plan, instance)
# #if not is_applicable(task.init, instance):
# # return False
# apply_action(task.init, instance)
# return True
# #replace_derived(task, set(), plan_instances)
# #preimage = plan_preimage(plan_instances, [])
# #return is_valid_plan(original_init, action_instances) #, task.goal)
| 19,908 |
Python
| 44.145125 | 122 | 0.631605 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/satisfaction.py
|
from __future__ import print_function
from collections import namedtuple
from hsr_tamp.pddlstream.algorithms.meta import solve
from hsr_tamp.pddlstream.algorithms.satisfaction import SatisfactionSolution
from hsr_tamp.pddlstream.algorithms.constraints import to_constant, ORDER_PREDICATE, ASSIGNED_PREDICATE, \
get_internal_prefix
from hsr_tamp.pddlstream.algorithms.downward import make_action, make_domain, make_predicate
from hsr_tamp.pddlstream.language.constants import is_parameter, Not, PDDLProblem, MINIMIZE, NOT, partition_facts, get_costs, \
get_constraints
from hsr_tamp.pddlstream.language.conversion import get_prefix, get_args, obj_from_value_expression
from hsr_tamp.pddlstream.utils import safe_zip
Cluster = namedtuple('Cluster', ['constraints', 'parameters'])
def get_parameters(expression):
head = get_prefix(expression)
if head in [NOT, MINIMIZE]:
return get_parameters(get_args(expression)[0])
return list(filter(is_parameter, get_args(expression)))
def update_cluster(cluster1, cluster2):
assert cluster2.parameters <= cluster1.parameters
cluster1.constraints.extend(cluster2.constraints)
cluster1.parameters.update(cluster2.parameters)
def cluster_constraints(terms):
# Can always combine clusters but leads to inefficient grounding
# The extreme case of this making a single goal
# Alternatively, can just keep each cluster separate (shouldn't slow down search much)
clusters = sorted([Cluster([constraint], set(get_parameters(constraint)))
for constraint in get_constraints(terms)],
key=lambda c: len(c.parameters), reverse=True)
cost_clusters = sorted([Cluster([cost], set(get_parameters(cost)))
for cost in get_costs(terms)],
key=lambda c: len(c.parameters))
for c1 in cost_clusters:
for c2 in reversed(clusters):
if 1 < len(get_costs(c1.constraints)) + len(get_costs(c2.constraints)):
continue
if c1.parameters <= c2.parameters:
update_cluster(c2, c1)
break
else:
# TODO: extend this to allow the intersection to cover the cluster
raise RuntimeError('Unable to find a cluster for cost term:', c1.constraints[0])
for i in reversed(range(len(clusters))):
c1 = clusters[i]
for j in reversed(range(i)):
c2 = clusters[j]
if 1 < len(get_costs(c1.constraints)) + len(get_costs(c2.constraints)):
continue
if c1.parameters <= c2.parameters:
update_cluster(c2, c1)
clusters.pop(i)
break
return clusters
##################################################
def planning_from_satisfaction(init, constraints):
clusters = cluster_constraints(constraints)
prefix = get_internal_prefix(internal=False)
assigned_predicate = ASSIGNED_PREDICATE.format(prefix)
order_predicate = ORDER_PREDICATE.format(prefix)
#order_value_facts = make_order_facts(order_predicate, 0, len(clusters)+1)
order_value_facts = [(order_predicate, '_t{}'.format(i)) for i in range(len(clusters)+1)]
init.append(order_value_facts[0])
goal_expression = order_value_facts[-1]
order_facts = list(map(obj_from_value_expression, order_value_facts))
bound_parameters = set()
actions = []
#constants = {}
for i, cluster in enumerate(clusters):
objectives = list(map(obj_from_value_expression, cluster.constraints))
constraints, negated, costs = partition_facts(objectives)
if negated:
raise NotImplementedError(negated)
#free_parameters = cluster.parameters - bound_parameters
existing_parameters = cluster.parameters & bound_parameters
# TODO: confirm that negated predicates work as intended
name = 'cluster-{}'.format(i)
parameters = list(sorted(cluster.parameters))
preconditions = [(assigned_predicate, to_constant(p), p) for p in sorted(existing_parameters)] + \
constraints + [order_facts[i]]
effects = [(assigned_predicate, to_constant(p), p) for p in parameters] + \
[order_facts[i+1], Not(order_facts[i])]
if costs:
assert len(costs) == 1
[cost] = costs
else:
cost = None
actions.append(make_action(name, parameters, preconditions, effects, cost))
#actions[-1].dump()
bound_parameters.update(cluster.parameters)
predicates = [make_predicate(order_predicate, ['?step'])] # '?num',
domain = make_domain(predicates=predicates, actions=actions)
return domain, goal_expression
##################################################
def pddl_from_csp(stream_pddl, stream_map, init, constraints):
domain, goal = planning_from_satisfaction(init, constraints)
constant_map = {}
return PDDLProblem(domain, constant_map, stream_pddl, stream_map, init, goal)
def bindings_from_plan(problem, plan):
if plan is None:
return None
domain = problem[0]
bindings = {}
for action, (name, args) in safe_zip(domain.actions, plan):
assert action.name == name
for param, arg in safe_zip(action.parameters, args):
name = param.name
assert bindings.get(name, arg) is arg
bindings[name] = arg
return bindings
##################################################
def solve_pddlstream_satisfaction(problem, **kwargs):
# TODO: prune set of streams based on constraints
# TODO: investigate constraint satisfaction techniques for search instead
# TODO: optimistic objects based on free parameters that prevent cycles
# TODO: disallow creation of new parameters / certifying new facts
stream_pddl, stream_map, init, constraints = problem
problem = pddl_from_csp(stream_pddl, stream_map, init, constraints)
plan, cost, facts = solve(problem, **kwargs)
bindings = bindings_from_plan(problem, plan)
return SatisfactionSolution(bindings, cost, facts)
| 6,114 |
Python
| 42.678571 | 127 | 0.651783 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/successor_generator.py
|
from collections import defaultdict, deque
from hsr_tamp.pddlstream.algorithms.downward import literal_holds, get_derived_predicates, apply_action
from hsr_tamp.pddlstream.algorithms.instantiate_task import get_goal_instance
from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import extract_axioms
class SuccessorNode(object):
def __init__(self, depth=0):
self.depth = depth
self.children = {}
self.instances = []
def get_child(self, value):
if value not in self.children:
self.children[value] = SuccessorNode(depth=self.depth + 1)
return self.children[value]
def get_successors(self, atom_order, state):
if len(atom_order) <= self.depth:
return self.instances
atom = atom_order[self.depth]
instances = []
for value, node in self.children.items():
if (value is None) or (literal_holds(state, atom) is value):
instances.extend(node.get_successors(atom_order, state))
return instances
def get_fluents(init, action_instances):
fluents = set()
for action in action_instances: # TODO: just actions if no action_instances
for cond, eff in action.add_effects:
assert not cond
if not literal_holds(init, eff):
fluents.add(eff)
for cond, eff in action.del_effects:
assert not cond
if not literal_holds(init, eff.negate()):
fluents.add(eff)
return fluents
class SuccessorGenerator(object):
def __init__(self, instantiated, action_instances=[]):
derived_predicates = get_derived_predicates(instantiated.task.axioms)
conditions = {literal.positive() for axiom in instantiated.axioms for literal in axiom.condition}
state = set(instantiated.task.init)
fluents = get_fluents(state, action_instances) & conditions
self.fluent_order = list(fluents)
applicable_axioms = []
axiom_from_literal = defaultdict(list)
# TODO: could also just use get_achieving_axioms
self.root = SuccessorNode()
for axiom in instantiated.axioms:
if all((l.predicate in derived_predicates) or (l.positive() in fluents) or
literal_holds(state, l) for l in axiom.condition):
applicable_axioms.append(axiom)
for literal in axiom.condition:
if literal in fluents:
axiom_from_literal[literal].append(axiom)
fluent_conds = {l.positive(): not l.negated for l in axiom.condition}
node = self.root
for atom in self.fluent_order:
value = fluent_conds.get(atom, None)
node = node.get_child(value)
node.instances.append(axiom)
def get_successors(self, state):
return self.root.get_successors(self.fluent_order, state)
##################################################
def mark_axiom(queue, remaining_from_axiom, axiom, axiom_from_atom):
if not remaining_from_axiom[id(axiom)]:
axiom_from_atom[axiom.effect].append(axiom)
queue.append(axiom.effect)
def mark_iteration(state, axioms_from_literal, fluents_from_axiom, remaining_from_axiom, static_axioms):
axioms_from_atom = defaultdict(list)
for literal in axioms_from_literal:
if literal_holds(state, literal):
axioms_from_atom[literal].append(None)
queue = deque(axioms_from_atom.keys())
for axiom in static_axioms:
mark_axiom(queue, remaining_from_axiom, axiom, axioms_from_atom)
while queue:
literal = queue.popleft()
for axiom in axioms_from_literal[literal]:
remaining_from_axiom[id(axiom)] -= 1
mark_axiom(queue, remaining_from_axiom, axiom, axioms_from_atom)
for literal, axioms in axioms_from_atom.items():
for axiom in axioms:
if axiom is not None:
remaining_from_axiom[id(axiom)] = fluents_from_axiom[id(axiom)]
# TODO: still some overhead here
# TODO: could process these layer by layer instead
return {atom: axioms[0] for atom, axioms in axioms_from_atom.items()}
def recover_axioms_plans2(instantiated, action_instances):
#import axiom_rules
#with Verbose(False):
# normalized_axioms, axiom_init, axiom_layer_dict = axiom_rules.handle_axioms(
# [], instantiated.axioms, instantiated.goal_list)
#state = set(instantiated.task.init + axiom_init)
normalized_axioms = instantiated.axioms # TODO: ignoring negated because cannot reinstantiate correctly
state = set(instantiated.task.init)
fluents = get_fluents(state, action_instances)
unprocessed_from_atom = defaultdict(list)
fluents_from_axiom = {}
remaining_from_axiom = {}
for axiom in normalized_axioms:
fluent_conditions = []
for literal in axiom.condition:
if literal.positive() in fluents:
fluent_conditions.append(literal)
elif not literal_holds(state, literal):
fluent_conditions = None
break
if fluent_conditions is None:
continue
for literal in fluent_conditions:
unprocessed_from_atom[literal].append(axiom)
fluents_from_axiom[id(axiom)] = len(fluent_conditions)
remaining_from_axiom[id(axiom)] = fluents_from_axiom[id(axiom)]
static_axioms = [axiom for axiom, num in fluents_from_axiom.items() if num == 0]
axiom_plans = []
for action in action_instances + [get_goal_instance(instantiated.task.goal)]:
axiom_from_atom = mark_iteration(state, unprocessed_from_atom,
fluents_from_axiom, remaining_from_axiom, static_axioms)
preimage = []
for literal in action.precondition:
if not literal_holds(state, literal):
preimage.append(literal)
assert literal in axiom_from_atom
for cond, eff in (action.add_effects + action.del_effects):
# TODO: add conditional effects that must hold here
assert not cond
axiom_plans.append([])
assert extract_axioms(axiom_from_atom, preimage, axiom_plans[-1])
apply_action(state, action)
return axiom_plans
| 6,339 |
Python
| 43.335664 | 107 | 0.631961 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/execution.py
|
from collections import defaultdict
from hsr_tamp.pddlstream.utils import INF
class ActionInfo(object):
def __init__(self, terminal=False, p_success=None, overhead=None):
"""
:param terminal: Indicates the action may require replanning after use
"""
self.terminal = terminal # TODO: infer from p_success?
if self.terminal:
self.p_success, self.overhead = 1e-3, 0
else:
self.p_success, self.overhead = 1, INF
if p_success is not None:
self.p_success = p_success
if overhead is not None:
self.overhead = overhead
# TODO: should overhead just be cost here then?
def get_action_info(action_info):
action_execution = defaultdict(ActionInfo)
for name, info in action_info.items():
action_execution[name] = info
return action_execution
| 877 |
Python
| 31.518517 | 78 | 0.63626 |
makolon/hsr_isaac_tamp/hsr_tamp/pddlstream/retired/reorder_actions.py
|
from hsr_tamp.pddlstream.algorithms.downward import fd_from_fact, substitute_derived, is_applicable, apply_action, \
fd_from_evaluation, task_from_domain_problem, get_problem, get_action_instances
from hsr_tamp.pddlstream.algorithms.reorder import separate_plan, get_stream_stats, dynamic_programming
from hsr_tamp.pddlstream.algorithms.scheduling.recover_axioms import extract_axioms
from hsr_tamp.pddlstream.algorithms.instantiate_task import get_achieving_axioms
from hsr_tamp.pddlstream.algorithms.scheduling.recover_streams import evaluations_from_stream_plan
from hsr_tamp.pddlstream.language.constants import get_prefix, EQ, is_plan, And
from hsr_tamp.pddlstream.language.conversion import evaluation_from_fact
from hsr_tamp.pddlstream.language.external import Result
from hsr_tamp.pddlstream.language.function import PredicateResult
from hsr_tamp.pddlstream.utils import Verbose, MockSet, neighbors_from_orders
# Extract streams required to do one action
# Compute streams that strongly depend on these. Evaluate these.
# Execute the full prefix of the plan
# Make the first action cheaper if uses something that doesn't need to re-expand
# How to do this with shared objects?
# Just do the same thing but make the cost 1 if a shared object
def get_stream_instances(stream_plan):
import pddl
# TODO: something that inverts the negative items
stream_instances = [] # TODO: could even apply these to the state directly
for result in stream_plan:
name = result.instance.external.name
precondition = list(map(fd_from_fact, result.instance.get_domain()))
effects = [([], fd_from_fact(fact)) for fact in result.get_certified() if get_prefix(fact) != EQ]
cost = None # TODO: effort?
instance = pddl.PropositionalAction(name, precondition, effects, cost)
stream_instances.append(instance)
return stream_instances
def instantiate_axioms(model, static_facts, fluent_facts, axiom_remap={}):
import pddl
instantiated_axioms = []
for atom in model:
if isinstance(atom.predicate, pddl.Axiom):
axiom = axiom_remap.get(atom.predicate, atom.predicate)
variable_mapping = dict([(par.name, arg)
for par, arg in zip(axiom.parameters, atom.args)])
inst_axiom = axiom.instantiate(variable_mapping, static_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
return instantiated_axioms
def replace_derived(task, negative_init, action_instances):
import pddl_to_prolog
import build_model
import axiom_rules
import pddl
original_actions = task.actions
original_init = task.init
task.actions = []
function_assignments = {f.fluent: f.expression for f in task.init
if isinstance(f, pddl.f_expression.FunctionAssignment)}
task.init = (set(task.init) | {a.negate() for a in negative_init}) - set(function_assignments)
for instance in action_instances:
#axiom_plan = extract_axiom_plan(task, instance, negative_from_name={}) # TODO: refactor this
# TODO: just instantiate task?
with Verbose(False):
model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init
# fluent_facts = instantiate.get_fluent_facts(task, model)
fluent_facts = MockSet()
instantiated_axioms = instantiate_axioms(model, task.init, fluent_facts)
goal_list = [] # TODO: include the goal?
with Verbose(False): # TODO: helpful_axioms prunes axioms that are already true (e.g. not Unsafe)
helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms([instance], instantiated_axioms, goal_list)
axiom_from_atom, _ = get_achieving_axioms(task.init | negative_init | set(axiom_init), helpful_axioms)
# negated_from_name=negated_from_name)
axiom_plan = []
extract_axioms(axiom_from_atom, instance.precondition, axiom_plan)
substitute_derived(axiom_plan, instance)
assert(is_applicable(task.init, instance))
apply_action(task.init, instance)
task.actions = original_actions
task.init = original_init
def get_combined_orders(evaluations, stream_plan, action_plan, domain):
if not is_plan(action_plan):
return action_plan
# TODO: could just do this within relaxed
# TODO: do I want to strip the fluents and just do the partial ordering?
stream_instances = get_stream_instances(stream_plan)
negative_results = filter(lambda r: isinstance(r, PredicateResult) and (r.value == False), stream_plan)
negative_init = set(fd_from_evaluation(evaluation_from_fact(f))
for r in negative_results for f in r.get_certified())
#negated_from_name = {r.instance.external.name for r in negative_results}
opt_evaluations = evaluations_from_stream_plan(evaluations, stream_plan)
goal_expression = And()
task = task_from_domain_problem(domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs=True))
action_instances = get_action_instances(task, action_plan)
replace_derived(task, negative_init, action_instances)
#combined_instances = stream_instances + action_instances
orders = set()
for i, a1 in enumerate(action_plan):
for a2 in action_plan[i+1:]:
orders.add((a1, a2))
# TODO: just store first achiever here
for i, instance1 in enumerate(stream_instances):
for j in range(i+1, len(stream_instances)):
effects = {e for _, e in instance1.add_effects}
if effects & set(stream_instances[j].precondition):
orders.add((stream_plan[i], stream_plan[j]))
for i, instance1 in enumerate(stream_instances):
for j, instance2 in enumerate(action_instances):
effects = {e for _, e in instance1.add_effects} | \
{e.negate() for _, e in instance1.del_effects}
if effects & set(instance2.precondition):
orders.add((stream_plan[i], action_plan[j]))
return orders
##################################################
def reorder_combined_plan(evaluations, combined_plan, action_info, domain, **kwargs):
# TODO: actions as a weak constraint
# TODO: actions are extremely unlikely to work
# TODO: can give actions extreme priority
if not is_plan(combined_plan):
return combined_plan
stream_plan, action_plan = separate_plan(combined_plan)
orders = get_combined_orders(evaluations, stream_plan, action_plan, domain)
_, out_orders = neighbors_from_orders(orders)
valid_combine = lambda v, subset: out_orders[v] <= subset
def stats_fn(operator):
if isinstance(operator, Result):
return get_stream_stats(operator)
name, _ = operator
info = action_info[name]
return info.p_success, info.overhead
return dynamic_programming(combined_plan, valid_combine, stats_fn, **kwargs)
##################################################
# def partial_ordered(plan):
# # https://www.aaai.org/ocs/index.php/ICAPS/ICAPS10/paper/viewFile/1420/1539
# # http://repository.cmu.edu/cgi/viewcontent.cgi?article=1349&context=compsci
# # https://arxiv.org/pdf/1105.5441.pdf
# # https://pdfs.semanticscholar.org/e057/e330249f447c2f065cf50db9dfaddad16aaa.pdf
# # https://github.mit.edu/caelan/PAL/blob/master/src/search/post_processing.cc
#
# instances = instantiate_plan(plan)
# orders = set()
# primary_effects = set() # TODO: start and goal operators here?
# for i in reversed(xrange(len(instances))):
# for pre in instances[i].preconditions:
# for j in reversed(xrange(i)):
# #if pre in instances[j].effects:
# if any(eff == pre for eff in instances[j].effects):
# orders.add((j, i))
# primary_effects.add((j, pre))
# break
# for eff in instances[i].effects:
# for j in xrange(i):
# if any((pre.head == eff.head) and (pre.value != eff.value) for pre in instances[j].preconditions):
# orders.add((j, i))
# if (i, eff) in primary_effects:
# for j in xrange(i):
# if any((eff2.head == eff.head) and (eff2.value != eff.value) for eff2 in instances[j].effects):
# orders.add((j, i))
# # TODO: could remove transitive
# # TODO: this isn't so helpful because it will choose arbitrary streams until an action is feasible (i.e. not intelligent ones)
# for i, (action, args) in enumerate(plan):
# print i, action, args #, instances[i].preconditions, instances[i].effects
# print orders
# print primary_effects
# print topological_sort(range(len(plan)), orders, lambda v: hasattr(plan[v][0], 'stream'))
| 8,909 |
Python
| 50.50289 | 132 | 0.662252 |
tanaydimri/omni.demo.ui/demo/ui/__init__.py
|
from .scripts.main_ui import *
| 30 |
Python
| 29.99997 | 30 | 0.766667 |
tanaydimri/omni.demo.ui/demo/ui/scripts/main_ui.py
|
import omni.ext
import omni.ui as ui
from demo.core import LetsPrint
from pathlib import Path
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class DemoUi(omni.ext.IExt):
def __init__(self):
self._demoWindow = None
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.epgCustomUi] The epgTestUi on startup callback..")
self.all_projects = None
self.__currentPath = Path(__file__).parent
self.__buttonIconPath = self.__currentPath.parent.parent.parent
self._default_project_image = "{0}/data/extension_preview_image.png".format(self.__buttonIconPath)
# Instantiating all the objects we might need.
# Here we are just instantiating a class to help print when the button is pressed
self._letsPrint = LetsPrint()
self._window = self._create_window()
def on_shutdown(self):
print("[omni.epgCustomUi] The epgTestUi on shutdown callback..")
self.destroy()
def destroy(self):
self._visiblity_changed_listener = None
self._demoWindow = None
def set_visible(self, value):
self._demoWindow.visible = value
def onButtonClick(self):
self._letsPrint.delegatedPrint()
def _create_window(self):
# IMPORTANT: Remember to pass in the "flags=ui.WINDOW_FLAGS_MENU_BAR" kwarg if you want to display Menu Items
if self._demoWindow is None:
self._demoWindow = ui.Window("Variant Manager", width=800, height=500, padding_x=10, padding_y=10, flags=ui.WINDOW_FLAGS_MENU_BAR)
self.set_visible(True)
else:
self.set_visible(True)
# This is how you add Menus to your UI.
with self._demoWindow.menu_bar:
with ui.Menu("File"):
ui.MenuItem("New")
ui.MenuItem("Open")
with ui.Menu("Open Recent"):
ui.MenuItem("myAwesomeScene.usd")
ui.MenuItem("anotherAwesomeScene.usd")
ui.MenuItem("yetAnotherAwesomeScene.usd")
ui.MenuItem("Save")
with self._demoWindow.frame:
demoLabel = ui.Label("EPIGRAPH PROJECTS", height=30, style={"font_size": 50, "color": 0xFF000000})
with ui.HStack():
demoButton = ui.Button("I will print Something", image_url=self._default_project_image, clicked_fn=self.onButtonClick)
| 2,764 |
Python
| 41.538461 | 142 | 0.641823 |
tanaydimri/omni.demo.ui/demo/core/__init__.py
|
from .scripts.letsPrint import *
| 32 |
Python
| 31.999968 | 32 | 0.8125 |
tanaydimri/omni.demo.ui/demo/core/scripts/letsPrint.py
|
import omni.ext
# Classes inheriting from "omni.ext.IExt" would be auto instantiated at runtime
# and the on_startup would be called. Besides, on_shutdown will be called while
# disabling this extension from the Extensions menu
class LetsPrint(omni.ext.IExt):
def __init__(self):
self.printer = Printer()
def on_startup(self):
print("Starting Up [epg.browser]")
def on_shutdown(self):
print("Shuting Down [epg.browser]")
def delegatedPrint(self):
self.printer.printSomething()
class Printer():
def __init__(self):
print("Printer Initialized")
def printSomething(self):
print("PRINTING SOMETHING NOW!!!")
| 630 |
Python
| 23.26923 | 80 | 0.726984 |
tanaydimri/omni.demo.ui/config/extension.toml
|
[package]
# Semantic Versionning is used: https://semver.org/
version = "0.0.0"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Tanay Dimri"]
# The title and description fields are primarily for displaying extension info in UI
title = "DEMO UI"
description="A demo UI to understand how an omni extension UI should be structured."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# Location of change log file in target (final) folder of extension, relative to the root. Can also be just a content
# of it instead of file path. More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Demo"
# Keywords for the extension
keywords = ["demo", "ui"]
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/extension_preview_image.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/extension_preview_image.png"
# We only depend on testing framework currently:
[dependencies]
"omni.ui" = {}
[[python.module]]
name = "demo.core"
# Main python module this extension provides, it will be publicly available as "import omni.example.hello".
[[python.module]]
name = "demo.ui"
# Additional python module with tests, to make them discoverable by test system.
#[[python.module]]
#name = "omni.example.hello.tests"
| 1,692 |
TOML
| 33.55102 | 118 | 0.747045 |
tanaydimri/omni.demo.ui/docs/CHANGELOG.md
|
Write all the Changelogs for this extension here:
----
v0.0.0 (Demo Release)
- Added the Button to UI which prints something.
- I hope this helps you to understand how Omniverse UI extensions could be built :)
| 210 |
Markdown
| 34.166661 | 83 | 0.757143 |
tanaydimri/omni.demo.ui/docs/README.md
|
**[NOT OFFICIALLY FROM NVIDIA]**
This is a demo extension with UI to understand how Omniverse Extension are structured.
To run this demo extension, put this in one of your Omniverse extension search paths. I prefere to put it under: "..\Documents\Kit\shared\exts" Then search for "demo" in your extensions tab, in any of the Omniverse applications.
**Some important points to notice:**
- The core functionality of the extension lives in demo/core. Here an init file in responsible for "collecting" all the core modules to be important by other modules.
- Likewise, the UI related code lives under demo/ui
- All the classes inheriting from "omni.ext.IExt" will be instantiated at the time when an extension is enabled and will automatically call the on_startup method. Besides, it will also call on_shutdown on extension disable.
- Be sure to read about Omni UI styling. There is a great documentation in your Omni Kit app, under Omni::UI Doc.
---
Hope this helps you with building your own extensions :)
- Stay Safe!!
| 1,022 |
Markdown
| 62.937496 | 228 | 0.774951 |
DataJuggler/DataJuggler.CameraKeys/README.md
|
Camera Keys is an Omniverse Python extension to move the camera around, and set all 6 camera keyframes at the current frame (Transform x,y,z and Rotation x,y,z).
<img src=https://github.com/DataJuggler/SharedRepo/blob/master/Shared/Images/CameraKeysExtension.png width=460 height=320>
To use this extension, you must have a camera named Camera at '/World/Camera'. Future versions may work with multiple cameras.
The extension is designed to help you easiily create camera fly throughs. Move your camera into position either manually, or using the buttons shown above and the transoform keys are set at the current frame.
Thanks to @mati-codes for writing the camera move forward and helping me with many questions.
Change the frame in the timeline to the desired frame, then move the camera into place and click 'Set Keys' to set the keyframes.
Use the slider amounts for rotation and movement amount to set how far the camera will move on any of the button clicks.
| 982 |
Markdown
| 41.739129 | 208 | 0.789206 |
DataJuggler/DataJuggler.CameraKeys/tools/scripts/link_app.py
|
import argparse
import json
import os
import sys
import packmanapi
import urllib3
def find_omniverse_apps():
http = urllib3.PoolManager()
try:
r = http.request("GET", "http://127.0.0.1:33480/components")
except Exception as e:
print(f"Failed retrieving apps from an Omniverse Launcher, maybe it is not installed?\nError: {e}")
sys.exit(1)
apps = {}
for x in json.loads(r.data.decode("utf-8")):
latest = x.get("installedVersions", {}).get("latest", "")
if latest:
for s in x.get("settings", []):
if s.get("version", "") == latest:
root = s.get("launch", {}).get("root", "")
apps[x["slug"]] = (x["name"], root)
break
return apps
def create_link(src, dst):
print(f"Creating a link '{src}' -> '{dst}'")
packmanapi.link(src, dst)
APP_PRIORITIES = ["code", "create", "view"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create folder link to Kit App installed from Omniverse Launcher")
parser.add_argument(
"--path",
help="Path to Kit App installed from Omniverse Launcher, e.g.: 'C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4'",
required=False,
)
parser.add_argument(
"--app", help="Name of Kit App installed from Omniverse Launcher, e.g.: 'code', 'create'", required=False
)
args = parser.parse_args()
path = args.path
if not path:
print("Path is not specified, looking for Omniverse Apps...")
apps = find_omniverse_apps()
if len(apps) == 0:
print(
"Can't find any Omniverse Apps. Use Omniverse Launcher to install one. 'Code' is the recommended app for developers."
)
sys.exit(0)
print("\nFound following Omniverse Apps:")
for i, slug in enumerate(apps):
name, root = apps[slug]
print(f"{i}: {name} ({slug}) at: '{root}'")
if args.app:
selected_app = args.app.lower()
if selected_app not in apps:
choices = ", ".join(apps.keys())
print(f"Passed app: '{selected_app}' is not found. Specify one of the following found Apps: {choices}")
sys.exit(0)
else:
selected_app = next((x for x in APP_PRIORITIES if x in apps), None)
if not selected_app:
selected_app = next(iter(apps))
print(f"\nSelected app: {selected_app}")
_, path = apps[selected_app]
if not os.path.exists(path):
print(f"Provided path doesn't exist: {path}")
else:
SCRIPT_ROOT = os.path.dirname(os.path.realpath(__file__))
create_link(f"{SCRIPT_ROOT}/../../app", path)
print("Success!")
| 2,814 |
Python
| 32.117647 | 133 | 0.562189 |
DataJuggler/DataJuggler.CameraKeys/tools/packman/config.packman.xml
|
<config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
| 211 |
XML
| 34.333328 | 123 | 0.691943 |
DataJuggler/DataJuggler.CameraKeys/tools/packman/bootstrap/install_package.py
|
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
import tempfile
import zipfile
__author__ = "hfannar"
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
class TemporaryDirectory:
def __init__(self):
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
# Remove temporary data created
shutil.rmtree(self.path)
def install_package(package_src_path, package_dst_path):
with zipfile.ZipFile(package_src_path, allowZip64=True) as zip_file, TemporaryDirectory() as temp_dir:
zip_file.extractall(temp_dir)
# Recursively copy (temp_dir will be automatically cleaned up on exit)
try:
# Recursive copy is needed because both package name and version folder could be missing in
# target directory:
shutil.copytree(temp_dir, package_dst_path)
except OSError as exc:
logger.warning("Directory %s already present, packaged installation aborted" % package_dst_path)
else:
logger.info("Package successfully installed to %s" % package_dst_path)
install_package(sys.argv[1], sys.argv[2])
| 1,844 |
Python
| 33.166666 | 108 | 0.703362 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/config/extension.toml
|
[package]
# Semantic Versioning is used: https://semver.org/
version = "1.1.2"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["DataJuggler"]
# The title and description fields are primarily for displaying extension info in UI
title = "datajuggler camerakeys"
description="A simple python extension example to use as a starting point for your extensions."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/DataJuggler/DataJuggler.CameraKeys"
# One of categories for UI.
category = "Cameras"
# Keywords for the extension
keywords = ["kit", "cameras"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import datajuggler.camerakeys".
[[python.module]]
name = "datajuggler.camerakeys"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 1,650 |
TOML
| 33.395833 | 118 | 0.753333 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/datajuggler/camerakeys/extension.py
|
import omni.ext
import omni.ui as ui
import omni.timeline
import math
from omni.kit.viewport.utility import get_active_viewport
from pxr import Sdf, Usd, UsdGeom, Gf
import omni.usd
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class DatajugglerCamerakeysExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[datajuggler.camerakeys] datajuggler camerakeys startup")
# Get the stage
stage = omni.usd.get_context().get_stage()
active_viewport = get_active_viewport()
if active_viewport:
# Pull meaningful information from the Viewport to frame a specific prim
time = active_viewport.time
camera_path = active_viewport.camera_path
else:
# Otherwise, create a camera that will be used to frame the prim_to_frame
camera_path = "/World/Camera"
UsdGeom.Camera.Define(stage, camera_path)
print(camera_path)
# Start at 100
self._MovementValue = 100
self._RotationValue = 5
self._label = None
self._window = ui.Window("Camera Keys", width=600, height=660)
with self._window.frame:
with ui.VStack():
label = ui.Label("Make sure your project has a camera named Camera at the World level '/World/Camera'")
self._label = label
def get_local_rot(prim: Usd.Prim):
return prim.GetAttribute("xformOp:rotateXYZ").Get()
def decompose_matrix(mat: Gf.Matrix4d):
reversed_ident_mtx = reversed(Gf.Matrix3d())
translate = mat.ExtractTranslation()
scale = Gf.Vec3d(*(v.GetLength() for v in mat.ExtractRotationMatrix()))
#must remove scaling from mtx before calculating rotations
mat.Orthonormalize()
# without reversed this seems to return angles in ZYX order
rotate = Gf.Vec3d(*reversed(mat.ExtractRotation().Decompose(*reversed_ident_mtx)))
return translate, rotate, scale
def Left_Click():
stage = omni.usd.get_context().get_stage()
camera = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(camera)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(-1,0,0,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
# forward_step = cam_fwd_unit_vec * 100
forward_step = cam_fwd_unit_vec * self._MovementValue
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
camera.GetAttribute("xformOp:translate").Set(translate)
def Forward_Click():
stage = omni.usd.get_context().get_stage()
camera = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(camera)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(0,0,-1,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
# forward_step = cam_fwd_unit_vec * 100
forward_step = cam_fwd_unit_vec * self._MovementValue
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
camera.GetAttribute("xformOp:translate").Set(translate)
def Back_Click():
stage = omni.usd.get_context().get_stage()
camera = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(camera)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(0,0,1,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
# forward_step = cam_fwd_unit_vec * 100
forward_step = cam_fwd_unit_vec * self._MovementValue
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
camera.GetAttribute("xformOp:translate").Set(translate)
def Right_Click():
stage = omni.usd.get_context().get_stage()
camera = stage.GetPrimAtPath("/World/Camera")
xform = UsdGeom.Xformable(camera)
local_transformation: Gf.Matrix4d = xform.GetLocalTransformation()
# Apply the local matrix to the start and end points of the camera's default forward vector (-Z)
a: Gf.Vec4d = Gf.Vec4d(-1,0,0,1) * local_transformation
b: Gf.Vec4d = Gf.Vec4d(0,0,0,1) * local_transformation
# Get the vector between those two points to get the camera's current forward vector
cam_fwd_vec = b-a
# Convert to Vec3 and then normalize to get unit vector
cam_fwd_unit_vec = Gf.Vec3d(cam_fwd_vec[:3]).GetNormalized()
# Multiply the forward direction vector with how far forward you want to move
# forward_step = cam_fwd_unit_vec * 100
forward_step = cam_fwd_unit_vec * self._MovementValue
# Create a new matrix with the translation that you want to perform
offset_mat = Gf.Matrix4d()
offset_mat.SetTranslate(forward_step)
# Apply the translation to the current local transform
new_transform = local_transformation * offset_mat
# Extract the new translation
translate: Gf.Vec3d = new_transform.ExtractTranslation()
# Update the attribute
camera.GetAttribute("xformOp:translate").Set(translate)
def XRotateUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationX = round(rotationX + self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(newRotationX, rotationY, rotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation X = " + str(newRotationX)
def XRotateDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationX = round(rotationX - self._RotationValue,1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(newRotationX, rotationY, rotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation X = " + str(newRotationX)
def YRotateUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# label.text = "Old Rotation Y = " + str(rotationY)
# calculate the new value
newRotationY = round(rotationY + self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(rotationX, newRotationY, rotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation Y = " + str(newRotationY)
def YRotateDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationY = round(rotationY - self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(rotationX, newRotationY, rotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation Y = " + str(newRotationY)
def ZRotateUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationZ = round(rotationZ + self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(rotationX, rotationY, newRotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New RotationZY = " + str(newRotationZ)
def ZRotateDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath("/World/Camera")
timeline = omni.timeline.get_timeline_interface()
current_frame = timeline.get_current_time() * timeline.get_time_codes_per_seconds()
xForm = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xForm.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
# local_rotate = get_local_rot(camera)
rotationX = round(decomposed_Transform[1][0], 1)
rotationY = round(decomposed_Transform[1][1], 1)
rotationZ = round(decomposed_Transform[1][2], 1)
# calculate the new value
newRotationZ = round(rotationZ - self._RotationValue, 1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path('/World/Camera.xformOp:rotateYXZ'),
value=Gf.Vec3f(rotationX, rotationY, newRotationZ),
prev=Gf.Vec3f(rotationX, rotationY, rotationZ))
label.text = "New Rotation Y = " + str(newRotationZ)
def XAxisDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformX = transformX - self._MovementValue
# display the new result
label.text = "The Camera object was moved down on the X Axis to " + str(round(newTransformX, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(newTransformX, transformY, transformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def XAxisUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformX = transformX + self._MovementValue
# display the new result
label.text = "The Camera object was moved up on the X Axis to " + str(round(newTransformX, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(newTransformX, transformY, transformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def YAxisUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformY = transformY + self._MovementValue
# display the new result
label.text = "The Camera object was moved up on the Y Axis to " + str(round(newTransformY, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(transformX, newTransformY, transformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def YAxisDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformY = transformY - self._MovementValue
# display the new result
label.text = "The Camera object was moved down on the Y Axis to " + str(round(newTransformY, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(transformX, newTransformY, transformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def ZAxisDown_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformZ = transformZ - self._MovementValue
# display the new result
label.text = "The Camera object was moved down on the Z Axis to " + str(round(newTransformZ, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(transformX, transformY, newTransformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def ZAxisUp_Click():
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
active_viewport = get_active_viewport()
camera_path = active_viewport.camera_path
camera = stage.GetPrimAtPath(camera_path)
xform = UsdGeom.Xformable(camera)
local_transform: Gf.Matrix4d = xform.GetLocalTransformation()
decomposed_Transform = decompose_matrix(local_transform)
transformX = round(decomposed_Transform[0][0], 1)
transformY = round(decomposed_Transform[0][1], 1)
transformZ = round(decomposed_Transform[0][2], 1)
# set the new transofrmX value
newTransformZ = transformZ + self._MovementValue
# display the new result
label.text = "The Camera object was moved up on the Z Axis to " + str(round(newTransformZ, 1))
# move the camera up
omni.kit.commands.execute('ChangeProperty',prop_path=Sdf.Path('/World/Camera.xformOp:translate'),
value=Gf.Vec3d(transformX, transformY, newTransformZ),
prev=Gf.Vec3d(transformX, transformY, transformZ))
def SetKeys_Click():
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:translate|x'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:translate|y'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:translate|z'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:rotateYXZ|x'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:rotateYXZ|y'])
omni.kit.commands.execute('SetAnimCurveKeys',
paths=['/World/Camera.xformOp:rotateYXZ|z'])
timeline = omni.timeline.get_timeline_interface()
time = timeline.get_current_time()
fps = timeline.get_time_codes_per_seconds()
frame = time * fps
label.text = "6 Keys Were Set at frame " + str(frame)
# add an IntSlider for translate Strength
ui.Label("Camera Rotation Amount")
self._rotationSlider = ui.IntSlider(min = 1, max = 90, step=5)
self._rotationSlider.model.set_value(5)
self._rotationValue = 5
self._rotationSlider.model.add_value_changed_fn(self._onrotation_value_changed)
with ui.HStack(height=40):
xAxisButtonUp = ui.Button("X +", clicked_fn=XRotateUp_Click)
yAxisButtonUp = ui.Button("Y +", clicked_fn=YRotateUp_Click)
zAxisButtonUp = ui.Button("Z +", clicked_fn=ZRotateUp_Click)
with ui.HStack(height=40):
xAxisButtonDown = ui.Button("X -", clicked_fn=XRotateDown_Click)
yAxisButtonDown = ui.Button("Y -", clicked_fn=YRotateDown_Click)
zAxisButtonDown = ui.Button("Z -", clicked_fn=ZRotateDown_Click)
# add an IntSlider for translate Strength
ui.Label("Camera Movement Amount")
self._movementSlider = ui.IntSlider(min = 10, max = 1000, step=10)
self._movementSlider.model.set_value(100)
self._MovementValue = 100
self._movementSlider.model.add_value_changed_fn(self._on_value_changed)
with ui.HStack(height=54):
leftButton = ui.Button("Left", clicked_fn=Left_Click)
forwardButton = ui.Button("Forward", clicked_fn=Forward_Click)
yAxisButtonUp = ui.Button("Back", clicked_fn=Back_Click)
rightButton = ui.Button("Right", clicked_fn=Right_Click)
with ui.HStack(height=54):
xAxisButtonUp = ui.Button("X +", clicked_fn=XAxisUp_Click)
yAxisButtonUp = ui.Button("Y +", clicked_fn=YAxisUp_Click)
zAxisButtonUp = ui.Button("Z +", clicked_fn=ZAxisUp_Click)
with ui.HStack(height=54):
xAxisButtonDown = ui.Button("X -", clicked_fn=XAxisDown_Click)
yAxisButtonDown = ui.Button("Y -", clicked_fn=YAxisDown_Click)
zAxisButtonDown = ui.Button("Z -", clicked_fn=ZAxisDown_Click)
# with ui.VStack(height=54):
# ui.Label("Shaky Cam Movement Amount - Only Applies To Forward")
# # add an IntSlider for translate Strength
# self._ShakySlider = ui.IntSlider(min = 1, max = 100, step=1)
# self._ShakySlider.model.set_value(0)
# self._ShakyValue = 0
# self._ShakySlider.model.add_value_changed_fn(self._on_shakyvalue_changed)
with ui.VStack(height=40):
ui.Label("")
ui.Label("Change the timeline to the desired frame before clicking the Set Keys button.")
with ui.VStack(height=60):
ui.Button("Set Keys", clicked_fn=SetKeys_Click)
def _on_value_changed(self, model: ui.SimpleIntModel):
self._MovementValue = model.get_value_as_int()
self._label.text = "Camera movement value = " + str(self._MovementValue)
def _onrotation_value_changed(self, model: ui.SimpleIntModel):
self._RotationValue = model.get_value_as_int()
self._label.text = "Camera rotation value = " + str(self._Rotation_Value)
#def _on_shakyvalue_changed(self, model: ui.SimpleIntModel):
# self._ShakyValue = model.get_value_as_int()
# self._label.text = "Camera shaky value = " + str(self._Shaky_Value)
def on_shutdown(self):
print("[datajuggler.camerakeys] datajuggler camerakeys shutdown")
| 32,636 |
Python
| 53.304492 | 119 | 0.527362 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/datajuggler/camerakeys/__init__.py
|
from .extension import *
| 25 |
Python
| 11.999994 | 24 | 0.76 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/datajuggler/camerakeys/tests/__init__.py
|
from .test_hello_world import *
| 31 |
Python
| 30.999969 | 31 | 0.774194 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/datajuggler/camerakeys/tests/test_hello_world.py
|
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
# Extnsion for writing UI tests (simulate UI interaction)
import omni.kit.ui_test as ui_test
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import datajuggler.camerakeys
# Having a test class dervived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
# Actual test, notice it is "async" function, so "await" can be used if needed
async def test_hello_public_function(self):
result = datajuggler.camerakeys.some_public_function(4)
self.assertEqual(result, 256)
async def test_window_button(self):
# Find a label in our window
label = ui_test.find("My Window//Frame/**/Label[*]")
# Find buttons in our window
add_button = ui_test.find("My Window//Frame/**/Button[*].text=='Add'")
reset_button = ui_test.find("My Window//Frame/**/Button[*].text=='Reset'")
# Click reset button
await reset_button.click()
self.assertEqual(label.widget.text, "empty")
await add_button.click()
self.assertEqual(label.widget.text, "count: 1")
await add_button.click()
self.assertEqual(label.widget.text, "count: 2")
| 1,680 |
Python
| 34.765957 | 142 | 0.684524 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/docs/CHANGELOG.md
|
# Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.1.1] - 2021-08-01
- I changed how the x+, x-, y+, y-, z+ and z- button click events work, as omni.usd.utils no longer exists in Composer 2023.1.1.
## [1.0.0] - 2021-04-26
- Initial version of extension UI template with a window
| 331 |
Markdown
| 29.181816 | 128 | 0.676737 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/docs/README.md
|
Camera Keys is an Omniverse Python extension to move the camera around, and set all 6 camera keyframes at the current frame (Transform x,y,z and Rotation x,y,z).
<img src=https://github.com/DataJuggler/SharedRepo/blob/master/Shared/Images/CameraKeysExtension.png width=460 height=320>
To use this extension, you must have a camera named Camera at '/World/Camera'. Future versions may work with multiple cameras.
The extension is designed to help you easiily create camera fly throughs. Move your camera into position either manually, or using the buttons shown above and the transoform keys are set at the current frame.
Thanks to @mati-codes for writing the camera move forward and helping me with many questions.
Change the frame in the timeline to the desired frame, then move the camera into place and click 'Set Keys' to set the keyframes.
Use the slider amounts for rotation and movement amount to set how far the camera will move on any of the button clicks.
| 982 |
Markdown
| 41.739129 | 208 | 0.789206 |
DataJuggler/DataJuggler.CameraKeys/exts/datajuggler.camerakeys/docs/index.rst
|
datajuggler.camerakeys
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"datajuggler.camerakeys"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 345 |
reStructuredText
| 15.47619 | 43 | 0.631884 |
isaac-sim/IsaacLab/pyproject.toml
|
[tool.isort]
py_version = 310
line_length = 120
group_by_package = true
# Files to skip
skip_glob = ["docs/*", "logs/*", "_isaac_sim/*", ".vscode/*"]
# Order of imports
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"ASSETS_FIRSTPARTY",
"FIRSTPARTY",
"EXTRA_FIRSTPARTY",
"LOCALFOLDER",
]
# Extra standard libraries considered as part of python (permissive licenses
extra_standard_library = [
"numpy",
"h5py",
"open3d",
"torch",
"tensordict",
"bpy",
"matplotlib",
"gymnasium",
"gym",
"scipy",
"hid",
"yaml",
"prettytable",
"toml",
"trimesh",
"tqdm",
]
# Imports from Isaac Sim and Omniverse
known_third_party = [
"omni.isaac.core",
"omni.replicator.isaac",
"omni.replicator.core",
"pxr",
"omni.kit.*",
"warp",
"carb",
]
# Imports from this repository
known_first_party = "omni.isaac.lab"
known_assets_firstparty = "omni.isaac.lab_assets"
known_extra_firstparty = [
"omni.isaac.lab_tasks"
]
# Imports from the local folder
known_local_folder = "config"
[tool.pyright]
include = ["source/extensions", "source/standalone"]
exclude = [
"**/__pycache__",
"**/_isaac_sim",
"**/docs",
"**/logs",
".git",
".vscode",
]
typeCheckingMode = "basic"
pythonVersion = "3.10"
pythonPlatform = "Linux"
enableTypeIgnoreComments = true
# This is required as the CI pre-commit does not download the module (i.e. numpy, torch, prettytable)
# Therefore, we have to ignore missing imports
reportMissingImports = "none"
# This is required to ignore for type checks of modules with stubs missing.
reportMissingModuleSource = "none" # -> most common: prettytable in mdp managers
reportGeneralTypeIssues = "none" # -> raises 218 errors (usage of literal MISSING in dataclasses)
reportOptionalMemberAccess = "warning" # -> raises 8 errors
reportPrivateUsage = "warning"
[tool.codespell]
skip = '*.usd,*.svg,*.png,_isaac_sim*,*.bib,*.css,*/_build'
quiet-level = 0
# the world list should always have words in lower case
ignore-words-list = "haa,slq,collapsable"
# todo: this is hack to deal with incorrect spelling of "Environment" in the Isaac Sim grid world asset
exclude-file = "source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/from_files/from_files.py"
| 2,304 |
TOML
| 23.521276 | 103 | 0.664497 |
isaac-sim/IsaacLab/CONTRIBUTING.md
|
# Contribution Guidelines
Isaac Lab is a community maintained project. We wholeheartedly welcome contributions to the project to make
the framework more mature and useful for everyone. These may happen in forms of bug reports, feature requests,
design proposals and more.
For general information on how to contribute see
<https://isaac-sim.github.io/IsaacLab/source/refs/contributing.html>.
| 393 |
Markdown
| 42.777773 | 110 | 0.816794 |
isaac-sim/IsaacLab/CONTRIBUTORS.md
|
# Isaac Lab Developers and Contributors
This is the official list of Isaac Lab Project developers and contributors.
To see the full list of contributors, please check the revision history in the source control.
Guidelines for modifications:
* Please keep the lists sorted alphabetically.
* Names should be added to this file as: *individual names* or *organizations*.
* E-mail addresses are tracked elsewhere to avoid spam.
## Developers
* Boston Dynamics AI Institute, Inc.
* ETH Zurich
* NVIDIA Corporation & Affiliates
* University of Toronto
---
* David Hoeller
* Farbod Farshidian
* Hunter Hansen
* James Smith
* James Tigue
* Kelly Guo
* Mayank Mittal
* Nikita Rudin
* Pascal Roth
## Contributors
* Anton Bjørndahl Mortensen
* Alice Zhou
* Andrej Orsula
* Antonio Serrano-Muñoz
* Arjun Bhardwaj
* Calvin Yu
* Chenyu Yang
* Jia Lin Yuan
* Jingzhou Liu
* Kourosh Darvish
* Lorenz Wellhausen
* Muhong Guo
* Nuralem Abizov
* Özhan Özen
* Qinxi Yu
* René Zurbrügg
* Ritvik Singh
* Rosario Scalise
* Shafeef Omar
* Vladimir Fokow
## Acknowledgements
* Ajay Mandlekar
* Animesh Garg
* Buck Babich
* Gavriel State
* Hammad Mazhar
* Marco Hutter
* Yunrong Guo
| 1,169 |
Markdown
| 17.28125 | 94 | 0.757913 |
isaac-sim/IsaacLab/README.md
|

---
# Isaac Lab
[](https://docs.omniverse.nvidia.com/isaacsim/latest/overview.html)
[](https://docs.python.org/3/whatsnew/3.10.html)
[](https://releases.ubuntu.com/20.04/)
[](https://pre-commit.com/)
[](https://isaac-sim.github.io/IsaacLab)
[](https://opensource.org/licenses/BSD-3-Clause)
**Isaac Lab** is a unified and modular framework for robot learning that aims to simplify common workflows
in robotics research (such as RL, learning from demonstrations, and motion planning). It is built upon
[NVIDIA Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/overview.html) to leverage the latest
simulation capabilities for photo-realistic scenes and fast and accurate simulation.
Please refer to our [documentation page](https://isaac-sim.github.io/IsaacLab) to learn more about the
installation steps, features, tutorials, and how to set up your project with Isaac Lab.
## Announcements
* [17.04.2024] [**v0.3.0**](https://github.com/isaac-sim/IsaacLab/releases/tag/v0.3.0):
Several improvements and bug fixes to the framework. Includes cabinet opening and dexterous manipulation environments,
terrain-aware patch sampling, and animation recording.
* [22.12.2023] [**v0.2.0**](https://github.com/isaac-sim/IsaacLab/releases/tag/v0.2.0):
Significant breaking updates to enhance the modularity and user-friendliness of the framework. Also includes
procedural terrain generation, warp-based custom ray-casters, and legged-locomotion environments.
## Contributing to Isaac Lab
We wholeheartedly welcome contributions from the community to make this framework mature and useful for everyone.
These may happen as bug reports, feature requests, or code contributions. For details, please check our
[contribution guidelines](https://isaac-sim.github.io/IsaacLab/source/refs/contributing.html).
## Troubleshooting
Please see the [troubleshooting](https://isaac-sim.github.io/IsaacLab/source/refs/troubleshooting.html) section for
common fixes or [submit an issue](https://github.com/isaac-sim/IsaacLab/issues).
For issues related to Isaac Sim, we recommend checking its [documentation](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html)
or opening a question on its [forums](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/67).
## Support
* Please use GitHub [Discussions](https://github.com/isaac-sim/IsaacLab/discussions) for discussing ideas, asking questions, and requests for new features.
* Github [Issues](https://github.com/isaac-sim/IsaacLab/issues) should only be used to track executable pieces of work with a definite scope and a clear deliverable. These can be fixing bugs, documentation issues, new features, or general updates.
## Acknowledgement
NVIDIA Isaac Sim is available freely under [individual license](https://www.nvidia.com/en-us/omniverse/download/). For more information about its license terms, please check [here](https://docs.omniverse.nvidia.com/app_isaacsim/common/NVIDIA_Omniverse_License_Agreement.html#software-support-supplement).
The Isaac Lab framework is released under [BSD-3 License](LICENSE). The license files of its dependencies and assets are present in the [`docs/licenses`](docs/licenses) directory.
| 3,717 |
Markdown
| 64.228069 | 304 | 0.781544 |
isaac-sim/IsaacLab/tools/tests_to_skip.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# The following tests are skipped by run_tests.py
TESTS_TO_SKIP = [
# lab
"test_argparser_launch.py", # app.close issue
"test_build_simulation_context_nonheadless.py", # headless
"test_env_var_launch.py", # app.close issue
"test_kwarg_launch.py", # app.close issue
"test_differential_ik.py", # Failing
# lab_tasks
"test_data_collector.py", # Failing
"test_record_video.py", # Failing
]
| 556 |
Python
| 29.944443 | 63 | 0.672662 |
isaac-sim/IsaacLab/tools/run_all_tests.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""A runner script for all the tests within source directory.
.. code-block:: bash
./isaaclab.sh -p tools/run_all_tests.py
# for dry run
./isaaclab.sh -p tools/run_all_tests.py --discover_only
# for quiet run
./isaaclab.sh -p tools/run_all_tests.py --quiet
# for increasing timeout (default is 600 seconds)
./isaaclab.sh -p tools/run_all_tests.py --timeout 1000
"""
import argparse
import logging
import os
import subprocess
import sys
import time
from datetime import datetime
from pathlib import Path
from prettytable import PrettyTable
# Tests to skip
from tests_to_skip import TESTS_TO_SKIP
ISAACLAB_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
"""Path to the root directory of the Isaac Lab repository."""
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Run all tests under current directory.")
# add arguments
parser.add_argument(
"--skip_tests",
default="",
help="Space separated list of tests to skip in addition to those in tests_to_skip.py.",
type=str,
nargs="*",
)
# configure default test directory (source directory)
default_test_dir = os.path.join(ISAACLAB_PATH, "source")
parser.add_argument(
"--test_dir", type=str, default=default_test_dir, help="Path to the directory containing the tests."
)
# configure default logging path based on time stamp
log_file_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ".log"
default_log_path = os.path.join(ISAACLAB_PATH, "logs", "test_results", log_file_name)
parser.add_argument(
"--log_path", type=str, default=default_log_path, help="Path to the log file to store the results in."
)
parser.add_argument("--discover_only", action="store_true", help="Only discover and print tests, don't run them.")
parser.add_argument("--quiet", action="store_true", help="Don't print to console, only log to file.")
parser.add_argument("--timeout", type=int, default=1200, help="Timeout for each test in seconds.")
# parse arguments
args = parser.parse_args()
return args
def test_all(
test_dir: str,
tests_to_skip: list[str],
log_path: str,
timeout: float = 1200.0,
discover_only: bool = False,
quiet: bool = False,
) -> bool:
"""Run all tests under the given directory.
Args:
test_dir: Path to the directory containing the tests.
tests_to_skip: List of tests to skip.
log_path: Path to the log file to store the results in.
timeout: Timeout for each test in seconds. Defaults to 600 seconds (10 minutes).
discover_only: If True, only discover and print the tests without running them. Defaults to False.
quiet: If False, print the output of the tests to the terminal console (in addition to the log file).
Defaults to False.
Returns:
True if all un-skipped tests pass or `discover_only` is True. Otherwise, False.
Raises:
ValueError: If any test to skip is not found under the given `test_dir`.
"""
# Create the log directory if it doesn't exist
os.makedirs(os.path.dirname(log_path), exist_ok=True)
# Add file handler to log to file
logging_handlers = [logging.FileHandler(log_path)]
# We also want to print to console
if not quiet:
logging_handlers.append(logging.StreamHandler())
# Set up logger
logging.basicConfig(level=logging.INFO, format="%(message)s", handlers=logging_handlers)
# Discover all tests under current directory
all_test_paths = [str(path) for path in Path(test_dir).resolve().rglob("*test_*.py")]
skipped_test_paths = []
test_paths = []
# Check that all tests to skip are actually in the tests
for test_to_skip in tests_to_skip:
for test_path in all_test_paths:
if test_to_skip in test_path:
break
else:
raise ValueError(f"Test to skip '{test_to_skip}' not found in tests.")
# Remove tests to skip from the list of tests to run
if len(tests_to_skip) != 0:
for test_path in all_test_paths:
if any([test_to_skip in test_path for test_to_skip in tests_to_skip]):
skipped_test_paths.append(test_path)
else:
test_paths.append(test_path)
else:
test_paths = all_test_paths
# Sort test paths so they're always in the same order
all_test_paths.sort()
test_paths.sort()
skipped_test_paths.sort()
# Print tests to be run
logging.info("\n" + "=" * 60 + "\n")
logging.info(f"The following {len(all_test_paths)} tests were found:")
for i, test_path in enumerate(all_test_paths):
logging.info(f"{i + 1:02d}: {test_path}")
logging.info("\n" + "=" * 60 + "\n")
logging.info(f"The following {len(skipped_test_paths)} tests are marked to be skipped:")
for i, test_path in enumerate(skipped_test_paths):
logging.info(f"{i + 1:02d}: {test_path}")
logging.info("\n" + "=" * 60 + "\n")
# Exit if only discovering tests
if discover_only:
return True
results = {}
# Run each script and store results
for test_path in test_paths:
results[test_path] = {}
before = time.time()
logging.info("\n" + "-" * 60 + "\n")
logging.info(f"[INFO] Running '{test_path}'\n")
try:
completed_process = subprocess.run(
[sys.executable, test_path], check=True, capture_output=True, timeout=timeout
)
except subprocess.TimeoutExpired as e:
logging.error(f"Timeout occurred: {e}")
result = "TIMEDOUT"
stdout = e.stdout
stderr = e.stderr
except subprocess.CalledProcessError as e:
# When check=True is passed to subprocess.run() above, CalledProcessError is raised if the process returns a
# non-zero exit code. The caveat is returncode is not correctly updated in this case, so we simply
# catch the exception and set this test as FAILED
result = "FAILED"
stdout = e.stdout
stderr = e.stderr
except Exception as e:
logging.error(f"Unexpected exception {e}. Please report this issue on the repository.")
result = "FAILED"
stdout = e.stdout
stderr = e.stderr
else:
# Should only get here if the process ran successfully, e.g. no exceptions were raised
# but we still check the returncode just in case
result = "PASSED" if completed_process.returncode == 0 else "FAILED"
stdout = completed_process.stdout
stderr = completed_process.stderr
after = time.time()
time_elapsed = after - before
# Decode stdout and stderr and write to file and print to console if desired
stdout_str = stdout.decode("utf-8") if stdout is not None else ""
stderr_str = stderr.decode("utf-8") if stderr is not None else ""
# Write to log file
logging.info(stdout_str)
logging.info(stderr_str)
logging.info(f"[INFO] Time elapsed: {time_elapsed:.2f} s")
logging.info(f"[INFO] Result '{test_path}': {result}")
# Collect results
results[test_path]["time_elapsed"] = time_elapsed
results[test_path]["result"] = result
# Calculate the number and percentage of passing tests
num_tests = len(all_test_paths)
num_passing = len([test_path for test_path in test_paths if results[test_path]["result"] == "PASSED"])
num_failing = len([test_path for test_path in test_paths if results[test_path]["result"] == "FAILED"])
num_timing_out = len([test_path for test_path in test_paths if results[test_path]["result"] == "TIMEDOUT"])
num_skipped = len(skipped_test_paths)
if num_tests == 0:
passing_percentage = 100
else:
passing_percentage = (num_passing + num_skipped) / num_tests * 100
# Print summaries of test results
summary_str = "\n\n"
summary_str += "===================\n"
summary_str += "Test Result Summary\n"
summary_str += "===================\n"
summary_str += f"Total: {num_tests}\n"
summary_str += f"Passing: {num_passing}\n"
summary_str += f"Failing: {num_failing}\n"
summary_str += f"Skipped: {num_skipped}\n"
summary_str += f"Timing Out: {num_timing_out}\n"
summary_str += f"Passing Percentage: {passing_percentage:.2f}%\n"
# Print time elapsed in hours, minutes, seconds
total_time = sum([results[test_path]["time_elapsed"] for test_path in test_paths])
summary_str += f"Total Time Elapsed: {total_time // 3600}h"
summary_str += f"{total_time // 60 % 60}m"
summary_str += f"{total_time % 60:.2f}s"
summary_str += "\n\n=======================\n"
summary_str += "Per Test Result Summary\n"
summary_str += "=======================\n"
# Construct table of results per test
per_test_result_table = PrettyTable(field_names=["Test Path", "Result", "Time (s)"])
per_test_result_table.align["Test Path"] = "l"
per_test_result_table.align["Time (s)"] = "r"
for test_path in test_paths:
per_test_result_table.add_row(
[test_path, results[test_path]["result"], f"{results[test_path]['time_elapsed']:0.2f}"]
)
for test_path in skipped_test_paths:
per_test_result_table.add_row([test_path, "SKIPPED", "N/A"])
summary_str += per_test_result_table.get_string()
# Print summary to console and log file
logging.info(summary_str)
# Only count failing and timing out tests towards failure
return num_failing + num_timing_out == 0
if __name__ == "__main__":
# parse command line arguments
args = parse_args()
# add tests to skip to the list of tests to skip
tests_to_skip = TESTS_TO_SKIP
tests_to_skip += args.skip_tests
# run all tests
test_success = test_all(
test_dir=args.test_dir,
tests_to_skip=tests_to_skip,
log_path=args.log_path,
timeout=args.timeout,
discover_only=args.discover_only,
quiet=args.quiet,
)
# update exit status based on all tests passing or not
if not test_success:
exit(1)
| 10,467 |
Python
| 36.519713 | 120 | 0.626827 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/pyproject.toml
|
[build-system]
requires = ["setuptools", "wheel", "toml"]
build-backend = "setuptools.build_meta"
| 98 |
TOML
| 23.749994 | 42 | 0.704082 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/setup.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Installation script for the 'omni.isaac.lab_assets' python package."""
import os
import toml
from setuptools import setup
# Obtain the extension data from the extension.toml file
EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__))
# Read the extension.toml file
EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml"))
# Installation operation
setup(
name="omni-isaac-lab_assets",
author="Isaac Lab Project Developers",
maintainer="Isaac Lab Project Developers",
url=EXTENSION_TOML_DATA["package"]["repository"],
version=EXTENSION_TOML_DATA["package"]["version"],
description=EXTENSION_TOML_DATA["package"]["description"],
keywords=EXTENSION_TOML_DATA["package"]["keywords"],
include_package_data=True,
python_requires=">=3.10",
packages=["omni.isaac.lab_assets"],
classifiers=[
"Natural Language :: English",
"Programming Language :: Python :: 3.10",
"Isaac Sim :: 4.0.0",
"Isaac Sim :: 2023.1.1",
],
zip_safe=False,
)
| 1,180 |
Python
| 30.078947 | 89 | 0.687288 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/test/test_valid_configs.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch the simulator
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import unittest
import omni.isaac.lab_assets as lab_assets # noqa: F401
from omni.isaac.lab.assets import AssetBase, AssetBaseCfg
from omni.isaac.lab.sensors import SensorBase, SensorBaseCfg
from omni.isaac.lab.sim import build_simulation_context
class TestValidEntitiesConfigs(unittest.TestCase):
"""Test cases for all registered entities configurations."""
@classmethod
def setUpClass(cls):
# load all registered entities configurations from the module
cls.registered_entities: dict[str, AssetBaseCfg | SensorBaseCfg] = {}
# inspect all classes from the module
for obj_name in dir(lab_assets):
obj = getattr(lab_assets, obj_name)
# store all registered entities configurations
if isinstance(obj, (AssetBaseCfg, SensorBaseCfg)):
cls.registered_entities[obj_name] = obj
# print all existing entities names
print(">>> All registered entities:", list(cls.registered_entities.keys()))
"""
Test fixtures.
"""
def test_asset_configs(self):
"""Check all registered asset configurations."""
# iterate over all registered assets
for asset_name, entity_cfg in self.registered_entities.items():
for device in ("cuda:0", "cpu"):
with self.subTest(asset_name=asset_name, device=device):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
# print the asset name
print(f">>> Testing entity {asset_name} on device {device}")
# name the prim path
entity_cfg.prim_path = "/World/asset"
# create the asset / sensors
entity: AssetBase | SensorBase = entity_cfg.class_type(entity_cfg) # type: ignore
# play the sim
sim.reset()
# check asset is initialized successfully
self.assertTrue(entity._is_initialized)
if __name__ == "__main__":
run_tests()
| 2,547 |
Python
| 34.388888 | 106 | 0.628583 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/config/extension.toml
|
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.1.3"
# Description
title = "Isaac Lab Assets"
description="Extension containing configuration instances of different assets and sensors"
readme = "docs/README.md"
repository = "https://github.com/isaac-sim/IsaacLab"
category = "robotics"
keywords = ["kit", "robotics", "assets", "isaaclab"]
[dependencies]
"omni.isaac.lab" = {}
# Main python module this extension provides.
[[python.module]]
name = "omni.isaac.lab_assets"
| 502 |
TOML
| 25.473683 | 90 | 0.729084 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/unitree.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for Unitree robots.
The following configurations are available:
* :obj:`UNITREE_A1_CFG`: Unitree A1 robot with DC motor model for the legs
* :obj:`UNITREE_GO1_CFG`: Unitree Go1 robot with actuator net model for the legs
* :obj:`UNITREE_GO2_CFG`: Unitree Go2 robot with DC motor model for the legs
* :obj:`H1_CFG`: H1 humanoid robot
Reference: https://github.com/unitreerobotics/unitree_ros
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ActuatorNetMLPCfg, DCMotorCfg, ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
##
# Configuration - Actuators.
##
GO1_ACTUATOR_CFG = ActuatorNetMLPCfg(
joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"],
network_file=f"{ISAACLAB_NUCLEUS_DIR}/ActuatorNets/Unitree/unitree_go1.pt",
pos_scale=-1.0,
vel_scale=1.0,
torque_scale=1.0,
input_order="pos_vel",
input_idx=[0, 1, 2],
effort_limit=23.7, # taken from spec sheet
velocity_limit=30.0, # taken from spec sheet
saturation_effort=23.7, # same as effort limit
)
"""Configuration of Go1 actuators using MLP model.
Actuator specifications: https://shop.unitree.com/products/go1-motor
This model is taken from: https://github.com/Improbable-AI/walk-these-ways
"""
##
# Configuration
##
UNITREE_A1_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/A1/a1.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.42),
joint_pos={
".*L_hip_joint": 0.1,
".*R_hip_joint": -0.1,
"F[L,R]_thigh_joint": 0.8,
"R[L,R]_thigh_joint": 1.0,
".*_calf_joint": -1.5,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"base_legs": DCMotorCfg(
joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"],
effort_limit=33.5,
saturation_effort=33.5,
velocity_limit=21.0,
stiffness=25.0,
damping=0.5,
friction=0.0,
),
},
)
"""Configuration of Unitree A1 using DC motor.
Note: Specifications taken from: https://www.trossenrobotics.com/a1-quadruped#specifications
"""
UNITREE_GO1_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/Go1/go1.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.4),
joint_pos={
".*L_hip_joint": 0.1,
".*R_hip_joint": -0.1,
"F[L,R]_thigh_joint": 0.8,
"R[L,R]_thigh_joint": 1.0,
".*_calf_joint": -1.5,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"base_legs": GO1_ACTUATOR_CFG,
},
)
"""Configuration of Unitree Go1 using MLP-based actuator model."""
UNITREE_GO2_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/Go2/go2.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.4),
joint_pos={
".*L_hip_joint": 0.1,
".*R_hip_joint": -0.1,
"F[L,R]_thigh_joint": 0.8,
"R[L,R]_thigh_joint": 1.0,
".*_calf_joint": -1.5,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"base_legs": DCMotorCfg(
joint_names_expr=[".*_hip_joint", ".*_thigh_joint", ".*_calf_joint"],
effort_limit=23.5,
saturation_effort=23.5,
velocity_limit=30.0,
stiffness=25.0,
damping=0.5,
friction=0.0,
),
},
)
"""Configuration of Unitree Go2 using DC-Motor actuator model."""
H1_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/H1/h1.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=4
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 1.05),
joint_pos={
".*_hip_yaw": 0.0,
".*_hip_roll": 0.0,
".*_hip_pitch": -0.28, # -16 degrees
".*_knee": 0.79, # 45 degrees
".*_ankle": -0.52, # -30 degrees
"torso": 0.0,
".*_shoulder_pitch": 0.28,
".*_shoulder_roll": 0.0,
".*_shoulder_yaw": 0.0,
".*_elbow": 0.52,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"legs": ImplicitActuatorCfg(
joint_names_expr=[".*_hip_yaw", ".*_hip_roll", ".*_hip_pitch", ".*_knee", "torso"],
effort_limit=300,
velocity_limit=100.0,
stiffness={
".*_hip_yaw": 150.0,
".*_hip_roll": 150.0,
".*_hip_pitch": 200.0,
".*_knee": 200.0,
"torso": 200.0,
},
damping={
".*_hip_yaw": 5.0,
".*_hip_roll": 5.0,
".*_hip_pitch": 5.0,
".*_knee": 5.0,
"torso": 5.0,
},
),
"feet": ImplicitActuatorCfg(
joint_names_expr=[".*_ankle"],
effort_limit=100,
velocity_limit=100.0,
stiffness={".*_ankle": 20.0},
damping={".*_ankle": 4.0},
),
"arms": ImplicitActuatorCfg(
joint_names_expr=[".*_shoulder_pitch", ".*_shoulder_roll", ".*_shoulder_yaw", ".*_elbow"],
effort_limit=300,
velocity_limit=100.0,
stiffness={
".*_shoulder_pitch": 40.0,
".*_shoulder_roll": 40.0,
".*_shoulder_yaw": 40.0,
".*_elbow": 40.0,
},
damping={
".*_shoulder_pitch": 10.0,
".*_shoulder_roll": 10.0,
".*_shoulder_yaw": 10.0,
".*_elbow": 10.0,
},
),
},
)
"""Configuration for the Unitree H1 Humanoid robot."""
H1_MINIMAL_CFG = H1_CFG.copy()
H1_MINIMAL_CFG.spawn.usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/H1/h1_minimal.usd"
"""Configuration for the Unitree H1 Humanoid robot with fewer collision meshes.
This configuration removes most collision meshes to speed up simulation.
"""
| 8,821 |
Python
| 31.91791 | 111 | 0.557306 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/shadow_hand.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the dexterous hand from Shadow Robot.
The following configurations are available:
* :obj:`SHADOW_HAND_CFG`: Shadow Hand with implicit actuator model.
Reference:
* https://www.shadowrobot.com/dexterous-hand-series/
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators.actuator_cfg import ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
SHADOW_HAND_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/ShadowHand/shadow_hand_instanceable.usd",
activate_contact_sensors=False,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=True,
retain_accelerations=True,
max_depenetration_velocity=1000.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True,
solver_position_iteration_count=8,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.0005,
),
# collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0),
joint_drive_props=sim_utils.JointDrivePropertiesCfg(drive_type="force"),
fixed_tendons_props=sim_utils.FixedTendonPropertiesCfg(limit_stiffness=30.0, damping=0.1),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.5),
rot=(0.0, 0.0, -0.7071, 0.7071),
joint_pos={".*": 0.0},
),
actuators={
"fingers": ImplicitActuatorCfg(
joint_names_expr=["robot0_WR.*", "robot0_(FF|MF|RF|LF|TH)J(3|2|1)", "robot0_(LF|TH)J4", "robot0_THJ0"],
effort_limit={
"robot0_WRJ1": 4.785,
"robot0_WRJ0": 2.175,
"robot0_(FF|MF|RF|LF)J1": 0.7245,
"robot0_FFJ(3|2)": 0.9,
"robot0_MFJ(3|2)": 0.9,
"robot0_RFJ(3|2)": 0.9,
"robot0_LFJ(4|3|2)": 0.9,
"robot0_THJ4": 2.3722,
"robot0_THJ3": 1.45,
"robot0_THJ(2|1)": 0.99,
"robot0_THJ0": 0.81,
},
stiffness={
"robot0_WRJ.*": 5.0,
"robot0_(FF|MF|RF|LF|TH)J(3|2|1)": 1.0,
"robot0_(LF|TH)J4": 1.0,
"robot0_THJ0": 1.0,
},
damping={
"robot0_WRJ.*": 0.5,
"robot0_(FF|MF|RF|LF|TH)J(3|2|1)": 0.1,
"robot0_(LF|TH)J4": 0.1,
"robot0_THJ0": 0.1,
},
),
},
soft_joint_pos_limit_factor=1.0,
)
"""Configuration of Shadow Hand robot."""
| 2,914 |
Python
| 32.895348 | 115 | 0.570007 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/sawyer.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Rethink Robotics arms.
The following configuration parameters are available:
* :obj:`SAWYER_CFG`: The Sawyer arm without any tool attached.
Reference: https://github.com/RethinkRobotics/sawyer_robot
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
SAWYER_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/RethinkRobotics/sawyer_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0
),
activate_contact_sensors=False,
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
"head_pan": 0.0,
"right_j0": 0.0,
"right_j1": -0.785,
"right_j2": 0.0,
"right_j3": 1.05,
"right_j4": 0.0,
"right_j5": 1.3,
"right_j6": 0.0,
},
),
actuators={
"head": ImplicitActuatorCfg(
joint_names_expr=["head_pan"],
velocity_limit=100.0,
effort_limit=8.0,
stiffness=800.0,
damping=40.0,
),
"arm": ImplicitActuatorCfg(
joint_names_expr=["right_j[0-6]"],
velocity_limit=100.0,
effort_limit={
"right_j[0-1]": 80.0,
"right_j[2-3]": 40.0,
"right_j[4-6]": 9.0,
},
stiffness=100.0,
damping=4.0,
),
},
)
"""Configuration of Rethink Robotics Sawyer arm."""
| 2,077 |
Python
| 28.685714 | 110 | 0.589793 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Package containing asset and sensor configurations."""
import os
import toml
# Conveniences to other module directories via relative paths
ISAACLAB_ASSETS_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../"))
"""Path to the extension source directory."""
ISAACLAB_ASSETS_DATA_DIR = os.path.join(ISAACLAB_ASSETS_EXT_DIR, "data")
"""Path to the extension data directory."""
ISAACLAB_ASSETS_METADATA = toml.load(os.path.join(ISAACLAB_ASSETS_EXT_DIR, "config", "extension.toml"))
"""Extension metadata dictionary parsed from the extension.toml file."""
# Configure the module-level variables
__version__ = ISAACLAB_ASSETS_METADATA["package"]["version"]
##
# Configuration for different assets.
##
from .allegro import *
from .ant import *
from .anymal import *
from .cartpole import *
from .franka import *
from .humanoid import *
from .kinova import *
from .quadcopter import *
from .ridgeback_franka import *
from .sawyer import *
from .shadow_hand import *
from .unitree import *
from .universal_robots import *
| 1,175 |
Python
| 26.999999 | 103 | 0.737872 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/ridgeback_franka.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Ridgeback-Manipulation robots.
The following configurations are available:
* :obj:`RIDGEBACK_FRANKA_PANDA_CFG`: Clearpath Ridgeback base with Franka Emika arm
Reference: https://github.com/ridgeback/ridgeback_manipulation
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
RIDGEBACK_FRANKA_PANDA_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Clearpath/RidgebackFranka/ridgeback_franka.usd",
articulation_props=sim_utils.ArticulationRootPropertiesCfg(enabled_self_collisions=False),
activate_contact_sensors=False,
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
# base
"dummy_base_prismatic_y_joint": 0.0,
"dummy_base_prismatic_x_joint": 0.0,
"dummy_base_revolute_z_joint": 0.0,
# franka arm
"panda_joint1": 0.0,
"panda_joint2": -0.569,
"panda_joint3": 0.0,
"panda_joint4": -2.810,
"panda_joint5": 0.0,
"panda_joint6": 2.0,
"panda_joint7": 0.741,
# tool
"panda_finger_joint.*": 0.035,
},
joint_vel={".*": 0.0},
),
actuators={
"base": ImplicitActuatorCfg(
joint_names_expr=["dummy_base_.*"],
velocity_limit=100.0,
effort_limit=1000.0,
stiffness=0.0,
damping=1e5,
),
"panda_shoulder": ImplicitActuatorCfg(
joint_names_expr=["panda_joint[1-4]"],
effort_limit=87.0,
velocity_limit=100.0,
stiffness=800.0,
damping=40.0,
),
"panda_forearm": ImplicitActuatorCfg(
joint_names_expr=["panda_joint[5-7]"],
effort_limit=12.0,
velocity_limit=100.0,
stiffness=800.0,
damping=40.0,
),
"panda_hand": ImplicitActuatorCfg(
joint_names_expr=["panda_finger_joint.*"],
effort_limit=200.0,
velocity_limit=0.2,
stiffness=1e5,
damping=1e3,
),
},
)
"""Configuration of Franka arm with Franka Hand on a Clearpath Ridgeback base using implicit actuator models.
The following control configuration is used:
* Base: velocity control
* Arm: position control with damping
* Hand: position control with damping
"""
| 2,734 |
Python
| 29.730337 | 109 | 0.603146 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/cassie.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for Agility robots.
The following configurations are available:
* :obj:`CASSIE_CFG`: Agility Cassie robot with simple PD controller for the legs
Reference: https://github.com/UMich-BipedLab/Cassie_Model/blob/master/urdf/cassie.urdf
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
##
# Configuration
##
CASSIE_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Agility/Cassie/cassie.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.9),
joint_pos={
"hip_abduction_left": 0.1,
"hip_rotation_left": 0.0,
"hip_flexion_left": 1.0,
"thigh_joint_left": -1.8,
"ankle_joint_left": 1.57,
"toe_joint_left": -1.57,
"hip_abduction_right": -0.1,
"hip_rotation_right": 0.0,
"hip_flexion_right": 1.0,
"thigh_joint_right": -1.8,
"ankle_joint_right": 1.57,
"toe_joint_right": -1.57,
},
joint_vel={".*": 0.0},
),
soft_joint_pos_limit_factor=0.9,
actuators={
"legs": ImplicitActuatorCfg(
joint_names_expr=["hip_.*", "thigh_.*", "ankle_.*"],
effort_limit=200.0,
velocity_limit=10.0,
stiffness={
"hip_abduction.*": 100.0,
"hip_rotation.*": 100.0,
"hip_flexion.*": 200.0,
"thigh_joint.*": 200.0,
"ankle_joint.*": 200.0,
},
damping={
"hip_abduction.*": 3.0,
"hip_rotation.*": 3.0,
"hip_flexion.*": 6.0,
"thigh_joint.*": 6.0,
"ankle_joint.*": 6.0,
},
),
"toes": ImplicitActuatorCfg(
joint_names_expr=["toe_.*"],
effort_limit=20.0,
velocity_limit=10.0,
stiffness={
"toe_joint.*": 20.0,
},
damping={
"toe_joint.*": 1.0,
},
),
},
)
| 2,954 |
Python
| 30.774193 | 110 | 0.537238 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/humanoid.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Mujoco Humanoid robot."""
from __future__ import annotations
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
HUMANOID_CFG = ArticulationCfg(
prim_path="{ENV_REGEX_NS}/Robot",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=None,
max_depenetration_velocity=10.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
copy_from_source=False,
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 1.34),
joint_pos={".*": 0.0},
),
actuators={
"body": ImplicitActuatorCfg(
joint_names_expr=[".*"],
stiffness={
".*_waist.*": 20.0,
".*_upper_arm.*": 10.0,
"pelvis": 10.0,
".*_lower_arm": 2.0,
".*_thigh:0": 10.0,
".*_thigh:1": 20.0,
".*_thigh:2": 10.0,
".*_shin": 5.0,
".*_foot.*": 2.0,
},
damping={
".*_waist.*": 5.0,
".*_upper_arm.*": 5.0,
"pelvis": 5.0,
".*_lower_arm": 1.0,
".*_thigh:0": 5.0,
".*_thigh:1": 5.0,
".*_thigh:2": 5.0,
".*_shin": 0.1,
".*_foot.*": 1.0,
},
),
},
)
"""Configuration for the Mujoco Humanoid robot."""
| 2,132 |
Python
| 29.471428 | 82 | 0.513133 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/universal_robots.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Universal Robots.
The following configuration parameters are available:
* :obj:`UR10_CFG`: The UR10 arm without a gripper.
Reference: https://github.com/ros-industrial/universal_robot
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
##
# Configuration
##
UR10_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/UniversalRobots/UR10/ur10_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
activate_contact_sensors=False,
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
"shoulder_pan_joint": 0.0,
"shoulder_lift_joint": -1.712,
"elbow_joint": 1.712,
"wrist_1_joint": 0.0,
"wrist_2_joint": 0.0,
"wrist_3_joint": 0.0,
},
),
actuators={
"arm": ImplicitActuatorCfg(
joint_names_expr=[".*"],
velocity_limit=100.0,
effort_limit=87.0,
stiffness=800.0,
damping=40.0,
),
},
)
"""Configuration of UR-10 arm using implicit actuator models."""
| 1,532 |
Python
| 26.872727 | 93 | 0.63577 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/franka.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Franka Emika robots.
The following configurations are available:
* :obj:`FRANKA_PANDA_CFG`: Franka Emika Panda robot with Panda hand
* :obj:`FRANKA_PANDA_HIGH_PD_CFG`: Franka Emika Panda robot with Panda hand with stiffer PD control
Reference: https://github.com/frankaemika/franka_ros
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
##
# Configuration
##
FRANKA_PANDA_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd",
activate_contact_sensors=False,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0
),
# collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0),
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
"panda_joint1": 0.0,
"panda_joint2": -0.569,
"panda_joint3": 0.0,
"panda_joint4": -2.810,
"panda_joint5": 0.0,
"panda_joint6": 3.037,
"panda_joint7": 0.741,
"panda_finger_joint.*": 0.04,
},
),
actuators={
"panda_shoulder": ImplicitActuatorCfg(
joint_names_expr=["panda_joint[1-4]"],
effort_limit=87.0,
velocity_limit=2.175,
stiffness=80.0,
damping=4.0,
),
"panda_forearm": ImplicitActuatorCfg(
joint_names_expr=["panda_joint[5-7]"],
effort_limit=12.0,
velocity_limit=2.61,
stiffness=80.0,
damping=4.0,
),
"panda_hand": ImplicitActuatorCfg(
joint_names_expr=["panda_finger_joint.*"],
effort_limit=200.0,
velocity_limit=0.2,
stiffness=2e3,
damping=1e2,
),
},
soft_joint_pos_limit_factor=1.0,
)
"""Configuration of Franka Emika Panda robot."""
FRANKA_PANDA_HIGH_PD_CFG = FRANKA_PANDA_CFG.copy()
FRANKA_PANDA_HIGH_PD_CFG.spawn.rigid_props.disable_gravity = True
FRANKA_PANDA_HIGH_PD_CFG.actuators["panda_shoulder"].stiffness = 400.0
FRANKA_PANDA_HIGH_PD_CFG.actuators["panda_shoulder"].damping = 80.0
FRANKA_PANDA_HIGH_PD_CFG.actuators["panda_forearm"].stiffness = 400.0
FRANKA_PANDA_HIGH_PD_CFG.actuators["panda_forearm"].damping = 80.0
"""Configuration of Franka Emika Panda robot with stiffer PD control.
This configuration is useful for task-space control using differential IK.
"""
| 3,026 |
Python
| 33.397727 | 110 | 0.64805 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/ant.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Mujoco Ant robot."""
from __future__ import annotations
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
ANT_CFG = ArticulationCfg(
prim_path="{ENV_REGEX_NS}/Robot",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Ant/ant_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=10.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
copy_from_source=False,
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.5),
joint_pos={
".*_leg": 0.0,
"front_left_foot": 0.785398, # 45 degrees
"front_right_foot": -0.785398,
"left_back_foot": -0.785398,
"right_back_foot": 0.785398,
},
),
actuators={
"body": ImplicitActuatorCfg(
joint_names_expr=[".*"],
stiffness=0.0,
damping=0.0,
),
},
)
"""Configuration for the Mujoco Ant robot."""
| 1,659 |
Python
| 28.642857 | 72 | 0.613623 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/cartpole.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for a simple Cartpole robot."""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
##
# Configuration
##
CARTPOLE_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Classic/Cartpole/cartpole.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
rigid_body_enabled=True,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=100.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 2.0), joint_pos={"slider_to_cart": 0.0, "cart_to_pole": 0.0}
),
actuators={
"cart_actuator": ImplicitActuatorCfg(
joint_names_expr=["slider_to_cart"],
effort_limit=400.0,
velocity_limit=100.0,
stiffness=0.0,
damping=10.0,
),
"pole_actuator": ImplicitActuatorCfg(
joint_names_expr=["cart_to_pole"], effort_limit=400.0, velocity_limit=100.0, stiffness=0.0, damping=0.0
),
},
)
"""Configuration for a simple Cartpole robot."""
| 1,738 |
Python
| 31.81132 | 115 | 0.638665 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/allegro.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Allegro Hand robots from Wonik Robotics.
The following configurations are available:
* :obj:`ALLEGRO_HAND_CFG`: Allegro Hand with implicit actuator model.
Reference:
* https://www.wonikrobotics.com/robot-hand
"""
import math
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators.actuator_cfg import ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
ALLEGRO_HAND_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/AllegroHand/allegro_hand_instanceable.usd",
activate_contact_sensors=False,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=True,
retain_accelerations=False,
enable_gyroscopic_forces=False,
angular_damping=0.01,
max_linear_velocity=1000.0,
max_angular_velocity=64 / math.pi * 180.0,
max_depenetration_velocity=1000.0,
max_contact_impulse=1e32,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True,
solver_position_iteration_count=8,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.0005,
),
# collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.5),
rot=(0.257551, 0.283045, 0.683330, -0.621782),
joint_pos={"^(?!thumb_joint_0).*": 0.0, "thumb_joint_0": 0.28},
),
actuators={
"fingers": ImplicitActuatorCfg(
joint_names_expr=[".*"],
effort_limit=0.5,
velocity_limit=100.0,
stiffness=3.0,
damping=0.1,
friction=0.01,
),
},
soft_joint_pos_limit_factor=1.0,
)
"""Configuration of Allegro Hand robot."""
| 2,181 |
Python
| 29.732394 | 98 | 0.644658 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/kinova.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Kinova Robotics arms.
The following configuration parameters are available:
* :obj:`KINOVA_JACO2_N7S300_CFG`: The Kinova JACO2 (7-Dof) arm with a 3-finger gripper.
* :obj:`KINOVA_JACO2_N6S300_CFG`: The Kinova JACO2 (6-Dof) arm with a 3-finger gripper.
* :obj:`KINOVA_GEN3_N7_CFG`: The Kinova Gen3 (7-Dof) arm with no gripper.
Reference: https://github.com/Kinovarobotics/kinova-ros
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
KINOVA_JACO2_N7S300_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Kinova/Jaco2/J2N7S300/j2n7s300_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0
),
activate_contact_sensors=False,
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
"j2n7s300_joint_1": 0.0,
"j2n7s300_joint_2": 2.76,
"j2n7s300_joint_3": 0.0,
"j2n7s300_joint_4": 2.0,
"j2n7s300_joint_5": 2.0,
"j2n7s300_joint_6": 0.0,
"j2n7s300_joint_7": 0.0,
"j2n7s300_joint_finger_[1-3]": 0.2, # close: 1.2, open: 0.2
"j2n7s300_joint_finger_tip_[1-3]": 0.2, # close: 1.2, open: 0.2
},
),
actuators={
"arm": ImplicitActuatorCfg(
joint_names_expr=[".*_joint_[1-7]"],
velocity_limit=100.0,
effort_limit={
".*_joint_[1-2]": 80.0,
".*_joint_[3-4]": 40.0,
".*_joint_[5-7]": 20.0,
},
stiffness={
".*_joint_[1-4]": 40.0,
".*_joint_[5-7]": 15.0,
},
damping={
".*_joint_[1-4]": 1.0,
".*_joint_[5-7]": 0.5,
},
),
"gripper": ImplicitActuatorCfg(
joint_names_expr=[".*_finger_[1-3]", ".*_finger_tip_[1-3]"],
velocity_limit=100.0,
effort_limit=2.0,
stiffness=1.2,
damping=0.01,
),
},
)
"""Configuration of Kinova JACO2 (7-Dof) arm with 3-finger gripper."""
KINOVA_JACO2_N6S300_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Kinova/Jaco2/J2N6S300/j2n6s300_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0
),
activate_contact_sensors=False,
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
"j2n6s300_joint_1": 0.0,
"j2n6s300_joint_2": 2.76,
"j2n6s300_joint_3": 2.76,
"j2n6s300_joint_4": 2.5,
"j2n6s300_joint_5": 2.0,
"j2n6s300_joint_6": 0.0,
"j2n6s300_joint_finger_[1-3]": 0.2, # close: 1.2, open: 0.2
"j2n6s300_joint_finger_tip_[1-3]": 0.2, # close: 1.2, open: 0.2
},
),
actuators={
"arm": ImplicitActuatorCfg(
joint_names_expr=[".*_joint_[1-6]"],
velocity_limit=100.0,
effort_limit={
".*_joint_[1-2]": 80.0,
".*_joint_3": 40.0,
".*_joint_[4-6]": 20.0,
},
stiffness={
".*_joint_[1-3]": 40.0,
".*_joint_[4-6]": 15.0,
},
damping={
".*_joint_[1-3]": 1.0,
".*_joint_[4-6]": 0.5,
},
),
"gripper": ImplicitActuatorCfg(
joint_names_expr=[".*_finger_[1-3]", ".*_finger_tip_[1-3]"],
velocity_limit=100.0,
effort_limit=2.0,
stiffness=1.2,
damping=0.01,
),
},
)
"""Configuration of Kinova JACO2 (6-Dof) arm with 3-finger gripper."""
KINOVA_GEN3_N7_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Kinova/Gen3/gen3n7_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0
),
activate_contact_sensors=False,
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
"joint_1": 0.0,
"joint_2": 0.65,
"joint_3": 0.0,
"joint_4": 1.89,
"joint_5": 0.0,
"joint_6": 0.6,
"joint_7": -1.57,
},
),
actuators={
"arm": ImplicitActuatorCfg(
joint_names_expr=["joint_[1-7]"],
velocity_limit=100.0,
effort_limit={
"joint_[1-4]": 39.0,
"joint_[5-7]": 9.0,
},
stiffness={
"joint_[1-4]": 40.0,
"joint_[5-7]": 15.0,
},
damping={
"joint_[1-4]": 1.0,
"joint_[5-7]": 0.5,
},
),
},
)
"""Configuration of Kinova Gen3 (7-Dof) arm with no gripper."""
| 5,943 |
Python
| 32.393258 | 110 | 0.52566 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/quadcopter.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the quadcopters"""
from __future__ import annotations
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
CRAZYFLIE_CFG = ArticulationCfg(
prim_path="{ENV_REGEX_NS}/Robot",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Crazyflie/cf2x.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=10.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
copy_from_source=False,
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.5),
joint_pos={
".*": 0.0,
},
joint_vel={
"m1_joint": 200.0,
"m2_joint": -200.0,
"m3_joint": 200.0,
"m4_joint": -200.0,
},
),
actuators={
"dummy": ImplicitActuatorCfg(
joint_names_expr=[".*"],
stiffness=0.0,
damping=0.0,
),
},
)
"""Configuration for the Crazyflie quadcopter."""
| 1,631 |
Python
| 27.137931 | 67 | 0.602085 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/anymal.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the ANYbotics robots.
The following configuration parameters are available:
* :obj:`ANYMAL_B_CFG`: The ANYmal-B robot with ANYdrives 3.0
* :obj:`ANYMAL_C_CFG`: The ANYmal-C robot with ANYdrives 3.0
* :obj:`ANYMAL_D_CFG`: The ANYmal-D robot with ANYdrives 3.0
Reference:
* https://github.com/ANYbotics/anymal_b_simple_description
* https://github.com/ANYbotics/anymal_c_simple_description
* https://github.com/ANYbotics/anymal_d_simple_description
"""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ActuatorNetLSTMCfg, DCMotorCfg
from omni.isaac.lab.assets.articulation import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
##
# Configuration - Actuators.
##
ANYDRIVE_3_SIMPLE_ACTUATOR_CFG = DCMotorCfg(
joint_names_expr=[".*HAA", ".*HFE", ".*KFE"],
saturation_effort=120.0,
effort_limit=80.0,
velocity_limit=7.5,
stiffness={".*": 40.0},
damping={".*": 5.0},
)
"""Configuration for ANYdrive 3.x with DC actuator model."""
ANYDRIVE_3_LSTM_ACTUATOR_CFG = ActuatorNetLSTMCfg(
joint_names_expr=[".*HAA", ".*HFE", ".*KFE"],
network_file=f"{ISAACLAB_NUCLEUS_DIR}/ActuatorNets/ANYbotics/anydrive_3_lstm_jit.pt",
saturation_effort=120.0,
effort_limit=80.0,
velocity_limit=7.5,
)
"""Configuration for ANYdrive 3.0 (used on ANYmal-C) with LSTM actuator model."""
##
# Configuration - Articulation.
##
ANYMAL_B_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-B/anymal_b.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
# collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.6),
joint_pos={
".*HAA": 0.0, # all HAA
".*F_HFE": 0.4, # both front HFE
".*H_HFE": -0.4, # both hind HFE
".*F_KFE": -0.8, # both front KFE
".*H_KFE": 0.8, # both hind KFE
},
),
actuators={"legs": ANYDRIVE_3_LSTM_ACTUATOR_CFG},
soft_joint_pos_limit_factor=0.95,
)
"""Configuration of ANYmal-B robot using actuator-net."""
ANYMAL_C_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd",
# usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_instanceable.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
# collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.6),
joint_pos={
".*HAA": 0.0, # all HAA
".*F_HFE": 0.4, # both front HFE
".*H_HFE": -0.4, # both hind HFE
".*F_KFE": -0.8, # both front KFE
".*H_KFE": 0.8, # both hind KFE
},
),
actuators={"legs": ANYDRIVE_3_LSTM_ACTUATOR_CFG},
soft_joint_pos_limit_factor=0.95,
)
"""Configuration of ANYmal-C robot using actuator-net."""
ANYMAL_D_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd",
# usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d_minimal.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0
),
# collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.6),
joint_pos={
".*HAA": 0.0, # all HAA
".*F_HFE": 0.4, # both front HFE
".*H_HFE": -0.4, # both hind HFE
".*F_KFE": -0.8, # both front KFE
".*H_KFE": 0.8, # both hind KFE
},
),
actuators={"legs": ANYDRIVE_3_LSTM_ACTUATOR_CFG},
soft_joint_pos_limit_factor=0.95,
)
"""Configuration of ANYmal-D robot using actuator-net.
Note:
Since we don't have a publicly available actuator network for ANYmal-D, we use the same network as ANYmal-C.
This may impact the sim-to-real transfer performance.
"""
| 5,811 |
Python
| 34.656442 | 112 | 0.63311 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/docs/CHANGELOG.rst
|
Changelog
---------
0.1.2 (2024-04-03)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added configurations for different arms from Kinova Robotics and Rethink Robotics.
0.1.1 (2024-03-11)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Added configurations for allegro and shadow hand assets.
0.1.0 (2023-12-20)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Moved all assets' configuration from ``omni.isaac.lab`` to ``omni.isaac.lab_assets`` extension.
| 423 |
reStructuredText
| 13.620689 | 97 | 0.586288 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/docs/README.md
|
# Isaac Lab: Assets for Robots and Objects
This extension contains configurations for various assets and sensors. The configuration instances are
used to spawn and configure the instances in the simulation. They are passed to their corresponding
classes during construction.
## Organizing custom assets
For Isaac Lab, we primarily store assets on the Omniverse Nucleus server. However, at times, it may be
needed to store the assets locally (for debugging purposes). In such cases, the extension's `data`
directory can be used for temporary hosting of assets.
Inside the `data` directory, we recommend following the same structure as our Nucleus directory
`Isaac/IsaacLab`. This helps us later to move these assets to the Nucleus server seamlessly.
The recommended directory structure inside `data` is as follows:
* **`Robots/<Company-Name>/<Robot-Name>`**: The USD files should be inside `<Robot-Name>` directory with
the name of the robot.
* **`Props/<Prop-Type>/<Prop-Name>`**: The USD files should be inside `<Prop-Name>` directory with the name
of the prop. This includes mounts, objects and markers.
* **`ActuatorNets/<Company-Name>`**: The actuator networks should inside `<Company-Name` directory with the
name of the actuator that it models.
* **`Policies/<Task-Name>`**: The policy should be JIT/ONNX compiled with the name `policy.pt`. It should also
contain the parameters used for training the checkpoint. This is to ensure reproducibility.
* **`Test/<Test-Name>`**: The asset used for unit testing purposes.
## Referring to the assets in your code
You can use the following snippet to refer to the assets:
```python
from omni.isaac.lab_assets import ISAACLAB_ASSETS_DATA_DIR
# ANYmal-C
ANYMAL_C_USD_PATH = f"{ISAACLAB_ASSETS_DATA_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd"
# ANYmal-D
ANYMAL_D_USD_PATH = f"{ISAACLAB_ASSETS_DATA_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd"
```
| 1,913 |
Markdown
| 44.571428 | 110 | 0.764245 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/pyproject.toml
|
[build-system]
requires = ["setuptools", "wheel", "toml"]
build-backend = "setuptools.build_meta"
| 98 |
TOML
| 23.749994 | 42 | 0.704082 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/setup.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Installation script for the 'omni.isaac.lab' python package."""
import os
import toml
from setuptools import setup
# Obtain the extension data from the extension.toml file
EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__))
# Read the extension.toml file
EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml"))
# Minimum dependencies required prior to installation
INSTALL_REQUIRES = [
# generic
"numpy",
"torch>=2.2.2",
"prettytable==3.3.0",
"tensordict",
"toml",
# devices
"hidapi",
# gym
"gymnasium==0.29.0",
# procedural-generation
"trimesh",
"pyglet<2",
]
# Installation operation
setup(
name="omni-isaac-lab",
author="Isaac Lab Project Developers",
maintainer="Isaac Lab Project Developers",
url=EXTENSION_TOML_DATA["package"]["repository"],
version=EXTENSION_TOML_DATA["package"]["version"],
description=EXTENSION_TOML_DATA["package"]["description"],
keywords=EXTENSION_TOML_DATA["package"]["keywords"],
license="BSD-3-Clause",
include_package_data=True,
python_requires=">=3.10",
install_requires=INSTALL_REQUIRES,
packages=["omni.isaac.lab"],
classifiers=[
"Natural Language :: English",
"Programming Language :: Python :: 3.10",
"Isaac Sim :: 4.0.0",
"Isaac Sim :: 2023.1.1",
],
zip_safe=False,
)
| 1,529 |
Python
| 25.842105 | 89 | 0.661871 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/devices/check_keyboard.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script shows how to use a teleoperation device with Isaac Sim.
The teleoperation device is a keyboard device that allows the user to control the robot.
It is possible to add additional callbacks to it for user-defined operations.
"""
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher
# launch omniverse app
app_launcher = AppLauncher()
simulation_app = app_launcher.app
"""Rest everything follows."""
import ctypes
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.lab.devices import Se3Keyboard
def print_cb():
"""Dummy callback function executed when the key 'L' is pressed."""
print("Print callback")
def quit_cb():
"""Dummy callback function executed when the key 'ESC' is pressed."""
print("Quit callback")
simulation_app.close()
def main():
# Load kit helper
sim = SimulationContext(physics_dt=0.01, rendering_dt=0.01)
# Create teleoperation interface
teleop_interface = Se3Keyboard(pos_sensitivity=0.1, rot_sensitivity=0.1)
# Add teleoperation callbacks
# available key buttons: https://docs.omniverse.nvidia.com/kit/docs/carbonite/latest/docs/python/carb.html?highlight=keyboardeventtype#carb.input.KeyboardInput
teleop_interface.add_callback("L", print_cb)
teleop_interface.add_callback("ESCAPE", quit_cb)
print("Press 'L' to print a message. Press 'ESC' to quit.")
# Check that boundedness of articulation is correct
if ctypes.c_long.from_address(id(teleop_interface)).value != 1:
raise RuntimeError("Teleoperation interface is not bounded to a single instance.")
# Reset interface internals
teleop_interface.reset()
# Play simulation
sim.reset()
# Simulate
while simulation_app.is_running():
# If simulation is stopped, then exit.
if sim.is_stopped():
break
# If simulation is paused, then skip.
if not sim.is_playing():
sim.step()
continue
# get keyboard command
delta_pose, gripper_command = teleop_interface.advance()
# print command
if gripper_command:
print(f"Gripper command: {gripper_command}")
# step simulation
sim.step()
# check if simulator is stopped
if sim.is_stopped():
break
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 2,589 |
Python
| 27.461538 | 163 | 0.679413 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sensors/check_contact_sensor.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to use the contact sensor sensor in Isaac Lab.
.. code-block:: bash
./isaaclab.sh -p source/extensions/omni.isaac.lab/test/sensors/test_contact_sensor.py --num_robots 2
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Contact Sensor Test Script")
parser.add_argument("--num_robots", type=int, default=64, help="Number of robots to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.core.utils.prims as prim_utils
from omni.isaac.cloner import GridCloner
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.carb import set_carb_setting
from omni.isaac.core.utils.viewports import set_camera_view
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.sensors.contact_sensor import ContactSensor, ContactSensorCfg
##
# Pre-defined configs
##
from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort:skip
"""
Helpers
"""
def design_scene():
"""Add prims to the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.SphereLightCfg()
cfg.func("/World/Light/GreySphere", cfg, translation=(4.5, 3.5, 10.0))
cfg.func("/World/Light/WhiteSphere", cfg, translation=(-4.5, 3.5, 10.0))
"""
Main
"""
def main():
"""Spawns the ANYmal robot and clones it using Isaac Sim Cloner API."""
# Load kit helper
sim = SimulationContext(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cuda:0")
# Set main camera
set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Enable hydra scene-graph instancing
# this is needed to visualize the scene when flatcache is enabled
set_carb_setting(sim._settings, "/persistent/omnihydra/useSceneGraphInstancing", True)
# Create interface to clone the scene
cloner = GridCloner(spacing=2.0)
cloner.define_base_env("/World/envs")
# Everything under the namespace "/World/envs/env_0" will be cloned
prim_utils.define_prim("/World/envs/env_0")
# Clone the scene
num_envs = args_cli.num_robots
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs)
_ = cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True)
# Design props
design_scene()
# Spawn things into the scene
robot_cfg = ANYMAL_C_CFG.replace(prim_path="/World/envs/env_.*/Robot")
robot_cfg.spawn.activate_contact_sensors = True
robot = Articulation(cfg=robot_cfg)
# Contact sensor
contact_sensor_cfg = ContactSensorCfg(
prim_path="/World/envs/env_.*/Robot/.*_SHANK", track_air_time=True, debug_vis=not args_cli.headless
)
contact_sensor = ContactSensor(cfg=contact_sensor_cfg)
# filter collisions within each environment instance
physics_scene_path = sim.get_physics_context().prim_path
cloner.filter_collisions(
physics_scene_path, "/World/collisions", envs_prim_paths, global_paths=["/World/defaultGroundPlane"]
)
# Play the simulator
sim.reset()
# print info
print(contact_sensor)
# Now we are ready!
print("[INFO]: Setup complete...")
# Define simulation stepping
decimation = 4
physics_dt = sim.get_physics_dt()
sim_dt = decimation * physics_dt
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# If simulation is stopped, then exit.
if sim.is_stopped():
break
# If simulation is paused, then skip.
if not sim.is_playing():
sim.step(render=False)
continue
# reset
if count % 1000 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel
robot.write_joint_state_to_sim(joint_pos, joint_vel)
robot.reset()
# perform 4 steps
for _ in range(decimation):
# apply actions
robot.set_joint_position_target(robot.data.default_joint_pos)
# write commands to sim
robot.write_data_to_sim()
# perform step
sim.step()
# fetch data
robot.update(physics_dt)
# update sim-time
sim_time += sim_dt
count += 1
# update the buffers
if sim.is_playing():
contact_sensor.update(sim_dt, force_recompute=True)
if count % 100 == 0:
print("Sim-time: ", sim_time)
print("Number of contacts: ", torch.count_nonzero(contact_sensor.data.current_air_time == 0.0).item())
print("-" * 80)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 5,432 |
Python
| 30.224138 | 118 | 0.654087 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sensors/check_ray_caster.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script shows how to use the ray caster from the Isaac Lab framework.
.. code-block:: bash
# Usage
./isaaclab.sh -p source/extensions/omni.isaac.lab/test/sensors/test_ray_caster.py --headless
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Ray Caster Test Script")
parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to clone.")
parser.add_argument(
"--terrain_type",
type=str,
default="generator",
help="Type of terrain to import. Can be 'generator' or 'usd' or 'plane'.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.core.utils.prims as prim_utils
from omni.isaac.cloner import GridCloner
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.viewports import set_camera_view
import omni.isaac.lab.sim as sim_utils
import omni.isaac.lab.terrains as terrain_gen
from omni.isaac.lab.sensors.ray_caster import RayCaster, RayCasterCfg, patterns
from omni.isaac.lab.terrains.config.rough import ROUGH_TERRAINS_CFG
from omni.isaac.lab.terrains.terrain_importer import TerrainImporter
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.lab.utils.timer import Timer
def design_scene(sim: SimulationContext, num_envs: int = 2048):
"""Design the scene."""
# Create interface to clone the scene
cloner = GridCloner(spacing=2.0)
cloner.define_base_env("/World/envs")
# Everything under the namespace "/World/envs/env_0" will be cloned
prim_utils.define_prim("/World/envs/env_0")
# Define the scene
# -- Light
cfg = sim_utils.DistantLightCfg(intensity=2000)
cfg.func("/World/light", cfg)
# -- Balls
cfg = sim_utils.SphereCfg(
radius=0.25,
rigid_props=sim_utils.RigidBodyPropertiesCfg(),
mass_props=sim_utils.MassPropertiesCfg(mass=0.5),
collision_props=sim_utils.CollisionPropertiesCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)),
)
cfg.func("/World/envs/env_0/ball", cfg, translation=(0.0, 0.0, 5.0))
# Clone the scene
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs)
cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True)
physics_scene_path = sim.get_physics_context().prim_path
cloner.filter_collisions(
physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"]
)
def main():
"""Main function."""
# Load kit helper
sim_params = {
"use_gpu": True,
"use_gpu_pipeline": True,
"use_flatcache": True, # deprecated from Isaac Sim 2023.1 onwards
"use_fabric": True, # used from Isaac Sim 2023.1 onwards
"enable_scene_query_support": True,
}
sim = SimulationContext(
physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, sim_params=sim_params, backend="torch", device="cuda:0"
)
# Set main camera
set_camera_view([0.0, 30.0, 25.0], [0.0, 0.0, -2.5])
# Parameters
num_envs = args_cli.num_envs
# Design the scene
design_scene(sim=sim, num_envs=num_envs)
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
terrain_type=args_cli.terrain_type,
terrain_generator=ROUGH_TERRAINS_CFG,
usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd",
max_init_terrain_level=None,
num_envs=1,
)
_ = TerrainImporter(terrain_importer_cfg)
# Create a ray-caster sensor
ray_caster_cfg = RayCasterCfg(
prim_path="/World/envs/env_.*/ball",
mesh_prim_paths=["/World/ground"],
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=(1.6, 1.0)),
attach_yaw_only=True,
debug_vis=not args_cli.headless,
)
ray_caster = RayCaster(cfg=ray_caster_cfg)
# Create a view over all the balls
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
# Play simulator
sim.reset()
# Initialize the views
# -- balls
ball_view.initialize()
# Print the sensor information
print(ray_caster)
# Get the initial positions of the balls
ball_initial_positions, ball_initial_orientations = ball_view.get_world_poses()
ball_initial_velocities = ball_view.get_velocities()
# Create a counter for resetting the scene
step_count = 0
# Simulate physics
while simulation_app.is_running():
# If simulation is stopped, then exit.
if sim.is_stopped():
break
# If simulation is paused, then skip.
if not sim.is_playing():
sim.step(render=False)
continue
# Reset the scene
if step_count % 500 == 0:
# sample random indices to reset
reset_indices = torch.randint(0, num_envs, (num_envs // 2,))
# reset the balls
ball_view.set_world_poses(
ball_initial_positions[reset_indices], ball_initial_orientations[reset_indices], indices=reset_indices
)
ball_view.set_velocities(ball_initial_velocities[reset_indices], indices=reset_indices)
# reset the sensor
ray_caster.reset(reset_indices)
# reset the counter
step_count = 0
# Step simulation
sim.step()
# Update the ray-caster
with Timer(f"Ray-caster update with {num_envs} x {ray_caster.num_rays} rays"):
ray_caster.update(dt=sim.get_physics_dt(), force_recompute=True)
# Update counter
step_count += 1
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 6,359 |
Python
| 33.565217 | 118 | 0.665042 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sensors/test_camera.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True, enable_cameras=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import copy
import numpy as np
import os
import random
import scipy.spatial.transform as tf
import torch
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.replicator.core as rep
from omni.isaac.core.prims import GeometryPrim, RigidPrim
from pxr import Gf, Usd, UsdGeom
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.sensors.camera import Camera, CameraCfg
from omni.isaac.lab.utils import convert_dict_to_backend
from omni.isaac.lab.utils.math import convert_quat
from omni.isaac.lab.utils.timer import Timer
# sample camera poses
POSITION = [2.5, 2.5, 2.5]
QUAT_ROS = [-0.17591989, 0.33985114, 0.82047325, -0.42470819]
QUAT_OPENGL = [0.33985113, 0.17591988, 0.42470818, 0.82047324]
QUAT_WORLD = [-0.3647052, -0.27984815, -0.1159169, 0.88047623]
class TestCamera(unittest.TestCase):
"""Test for USD Camera sensor."""
def setUp(self):
"""Create a blank new stage for each test."""
self.camera_cfg = CameraCfg(
height=128,
width=128,
prim_path="/World/Camera",
update_period=0,
data_types=["distance_to_image_plane"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
),
)
# Create a new stage
stage_utils.create_new_stage()
# Simulation time-step
self.dt = 0.01
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=self.dt)
self.sim: sim_utils.SimulationContext = sim_utils.SimulationContext(sim_cfg)
# populate scene
self._populate_scene()
# load stage
stage_utils.update_stage()
def tearDown(self):
"""Stops simulator after each test."""
# close all the opened viewport from before.
rep.vp_manager.destroy_hydra_textures("Replicator")
# stop simulation
# note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :(
self.sim._timeline.stop()
# clear the stage
self.sim.clear_all_callbacks()
self.sim.clear_instance()
"""
Tests
"""
def test_camera_init(self):
"""Test camera initialization."""
# Create camera
camera = Camera(self.camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[0].GetPath().pathString, self.camera_cfg.prim_path)
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (1, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (1, 4))
self.assertEqual(camera.data.quat_w_world.shape, (1, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (1, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (1, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
self.assertEqual(camera.data.info, [{self.camera_cfg.data_types[0]: None}])
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for im_data in camera.data.output.to_dict().values():
self.assertEqual(im_data.shape, (1, self.camera_cfg.height, self.camera_cfg.width))
def test_camera_init_offset(self):
"""Test camera initialization with offset using different conventions."""
# define the same offset in all conventions
# -- ROS convention
cam_cfg_offset_ros = copy.deepcopy(self.camera_cfg)
cam_cfg_offset_ros.offset = CameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_ROS,
convention="ros",
)
cam_cfg_offset_ros.prim_path = "/World/CameraOffsetRos"
camera_ros = Camera(cam_cfg_offset_ros)
# -- OpenGL convention
cam_cfg_offset_opengl = copy.deepcopy(self.camera_cfg)
cam_cfg_offset_opengl.offset = CameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_OPENGL,
convention="opengl",
)
cam_cfg_offset_opengl.prim_path = "/World/CameraOffsetOpengl"
camera_opengl = Camera(cam_cfg_offset_opengl)
# -- World convention
cam_cfg_offset_world = copy.deepcopy(self.camera_cfg)
cam_cfg_offset_world.offset = CameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_WORLD,
convention="world",
)
cam_cfg_offset_world.prim_path = "/World/CameraOffsetWorld"
camera_world = Camera(cam_cfg_offset_world)
# play sim
self.sim.reset()
# retrieve camera pose using USD API
prim_tf_ros = camera_ros._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default())
prim_tf_opengl = camera_opengl._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default())
prim_tf_world = camera_world._sensor_prims[0].ComputeLocalToWorldTransform(Usd.TimeCode.Default())
# convert them from column-major to row-major
prim_tf_ros = np.transpose(prim_tf_ros)
prim_tf_opengl = np.transpose(prim_tf_opengl)
prim_tf_world = np.transpose(prim_tf_world)
# check that all transforms are set correctly
np.testing.assert_allclose(prim_tf_ros[0:3, 3], cam_cfg_offset_ros.offset.pos)
np.testing.assert_allclose(prim_tf_opengl[0:3, 3], cam_cfg_offset_opengl.offset.pos)
np.testing.assert_allclose(prim_tf_world[0:3, 3], cam_cfg_offset_world.offset.pos)
np.testing.assert_allclose(
convert_quat(tf.Rotation.from_matrix(prim_tf_ros[:3, :3]).as_quat(), "wxyz"),
cam_cfg_offset_opengl.offset.rot,
rtol=1e-5,
)
np.testing.assert_allclose(
convert_quat(tf.Rotation.from_matrix(prim_tf_opengl[:3, :3]).as_quat(), "wxyz"),
cam_cfg_offset_opengl.offset.rot,
rtol=1e-5,
)
np.testing.assert_allclose(
convert_quat(tf.Rotation.from_matrix(prim_tf_world[:3, :3]).as_quat(), "wxyz"),
cam_cfg_offset_opengl.offset.rot,
rtol=1e-5,
)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# check if transform correctly set in output
np.testing.assert_allclose(camera_ros.data.pos_w[0].cpu().numpy(), cam_cfg_offset_ros.offset.pos, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_ros[0].cpu().numpy(), QUAT_ROS, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_opengl[0].cpu().numpy(), QUAT_OPENGL, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_world[0].cpu().numpy(), QUAT_WORLD, rtol=1e-5)
def test_multi_camera_init(self):
"""Test multi-camera initialization."""
# create two cameras with different prim paths
# -- camera 1
cam_cfg_1 = copy.deepcopy(self.camera_cfg)
cam_cfg_1.prim_path = "/World/Camera_1"
cam_1 = Camera(cam_cfg_1)
# -- camera 2
cam_cfg_2 = copy.deepcopy(self.camera_cfg)
cam_cfg_2.prim_path = "/World/Camera_2"
cam_2 = Camera(cam_cfg_2)
# play sim
self.sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
cam_1.update(self.dt)
cam_2.update(self.dt)
# check image data
for cam in [cam_1, cam_2]:
for im_data in cam.data.output.to_dict().values():
self.assertEqual(im_data.shape, (1, self.camera_cfg.height, self.camera_cfg.width))
def test_camera_set_world_poses(self):
"""Test camera function to set specific world pose."""
camera = Camera(self.camera_cfg)
# play sim
self.sim.reset()
# convert to torch tensors
position = torch.tensor([POSITION], dtype=torch.float32, device=camera.device)
orientation = torch.tensor([QUAT_WORLD], dtype=torch.float32, device=camera.device)
# set new pose
camera.set_world_poses(position.clone(), orientation.clone(), convention="world")
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# check if transform correctly set in output
torch.testing.assert_close(camera.data.pos_w, position)
torch.testing.assert_close(camera.data.quat_w_world, orientation)
def test_camera_set_world_poses_from_view(self):
"""Test camera function to set specific world pose from view."""
camera = Camera(self.camera_cfg)
# play sim
self.sim.reset()
# convert to torch tensors
eyes = torch.tensor([POSITION], dtype=torch.float32, device=camera.device)
targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device)
quat_ros_gt = torch.tensor([QUAT_ROS], dtype=torch.float32, device=camera.device)
# set new pose
camera.set_world_poses_from_view(eyes.clone(), targets.clone())
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# check if transform correctly set in output
torch.testing.assert_close(camera.data.pos_w, eyes)
torch.testing.assert_close(camera.data.quat_w_ros, quat_ros_gt)
def test_intrinsic_matrix(self):
"""Checks that the camera's set and retrieve methods work for intrinsic matrix."""
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.height = 240
camera_cfg.width = 320
camera = Camera(camera_cfg)
# play sim
self.sim.reset()
# Desired properties (obtained from realsense camera at 320x240 resolution)
rs_intrinsic_matrix = [229.31640625, 0.0, 164.810546875, 0.0, 229.826171875, 122.1650390625, 0.0, 0.0, 1.0]
rs_intrinsic_matrix = torch.tensor(rs_intrinsic_matrix, device=camera.device).reshape(3, 3).unsqueeze(0)
# Set matrix into simulator
camera.set_intrinsic_matrices(rs_intrinsic_matrix.clone())
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# Check that matrix is correct
# TODO: This is not correctly setting all values in the matrix since the
# vertical aperture and aperture offsets are not being set correctly
# This is a bug in the simulator.
torch.testing.assert_close(rs_intrinsic_matrix[0, 0, 0], camera.data.intrinsic_matrices[0, 0, 0])
# torch.testing.assert_close(rs_intrinsic_matrix[0, 1, 1], camera.data.intrinsic_matrices[0, 1, 1])
def test_camera_resolution_all_colorize(self):
"""Test camera resolution is correctly set for all types with colorization enabled."""
# Add all types
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.data_types = [
"rgb",
"distance_to_image_plane",
"normals",
"semantic_segmentation",
"instance_segmentation_fast",
"instance_id_segmentation_fast",
]
camera_cfg.colorize_instance_id_segmentation = True
camera_cfg.colorize_instance_segmentation = True
camera_cfg.colorize_semantic_segmentation = True
# Create camera
camera = Camera(camera_cfg)
# Play sim
self.sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
camera.update(self.dt)
# expected sizes
hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 4)
hw_1c_shape = (1, camera_cfg.height, camera_cfg.width)
# access image data and compare shapes
output = camera.data.output
self.assertEqual(output["rgb"].shape, hw_3c_shape)
self.assertEqual(output["distance_to_image_plane"].shape, hw_1c_shape)
self.assertEqual(output["normals"].shape, hw_3c_shape)
self.assertEqual(output["semantic_segmentation"].shape, hw_3c_shape)
self.assertEqual(output["instance_segmentation_fast"].shape, hw_3c_shape)
self.assertEqual(output["instance_id_segmentation_fast"].shape, hw_3c_shape)
# access image data and compare dtype
output = camera.data.output
self.assertEqual(output["rgb"].dtype, torch.uint8)
self.assertEqual(output["distance_to_image_plane"].dtype, torch.float)
self.assertEqual(output["normals"].dtype, torch.float)
self.assertEqual(output["semantic_segmentation"].dtype, torch.uint8)
self.assertEqual(output["instance_segmentation_fast"].dtype, torch.uint8)
self.assertEqual(output["instance_id_segmentation_fast"].dtype, torch.uint8)
def test_camera_resolution_no_colorize(self):
"""Test camera resolution is correctly set for all types with no colorization enabled."""
# Add all types
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.data_types = [
"rgb",
"distance_to_image_plane",
"normals",
"semantic_segmentation",
"instance_segmentation_fast",
"instance_id_segmentation_fast",
]
camera_cfg.colorize_instance_id_segmentation = False
camera_cfg.colorize_instance_segmentation = False
camera_cfg.colorize_semantic_segmentation = False
# Create camera
camera = Camera(camera_cfg)
# Play sim
self.sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(12):
self.sim.step()
camera.update(self.dt)
# expected sizes
hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 4)
hw_1c_shape = (1, camera_cfg.height, camera_cfg.width)
# access image data and compare shapes
output = camera.data.output
self.assertEqual(output["rgb"].shape, hw_3c_shape)
self.assertEqual(output["distance_to_image_plane"].shape, hw_1c_shape)
self.assertEqual(output["normals"].shape, hw_3c_shape)
self.assertEqual(output["semantic_segmentation"].shape, hw_1c_shape)
self.assertEqual(output["instance_segmentation_fast"].shape, hw_1c_shape)
self.assertEqual(output["instance_id_segmentation_fast"].shape, hw_1c_shape)
# access image data and compare dtype
output = camera.data.output
self.assertEqual(output["rgb"].dtype, torch.uint8)
self.assertEqual(output["distance_to_image_plane"].dtype, torch.float)
self.assertEqual(output["normals"].dtype, torch.float)
self.assertEqual(output["semantic_segmentation"].dtype, torch.int32)
self.assertEqual(output["instance_segmentation_fast"].dtype, torch.int32)
self.assertEqual(output["instance_id_segmentation_fast"].dtype, torch.int32)
def test_throughput(self):
"""Checks that the single camera gets created properly with a rig."""
# Create directory temp dir to dump the results
file_dir = os.path.dirname(os.path.realpath(__file__))
temp_dir = os.path.join(file_dir, "output", "camera", "throughput")
os.makedirs(temp_dir, exist_ok=True)
# Create replicator writer
rep_writer = rep.BasicWriter(output_dir=temp_dir, frame_padding=3)
# create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.height = 480
camera_cfg.width = 640
camera = Camera(camera_cfg)
# Play simulator
self.sim.reset()
# Set camera pose
eyes = torch.tensor([[2.5, 2.5, 2.5]], dtype=torch.float32, device=camera.device)
targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device)
camera.set_world_poses_from_view(eyes, targets)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Simulate physics
for _ in range(5):
# perform rendering
self.sim.step()
# update camera
with Timer(f"Time taken for updating camera with shape {camera.image_shape}"):
camera.update(self.dt)
# Save images
with Timer(f"Time taken for writing data with shape {camera.image_shape} "):
# Pack data back into replicator format to save them using its writer
if self.sim.get_version()[0] == 4:
rep_output = {"annotators": {}}
camera_data = convert_dict_to_backend(camera.data.output[0].to_dict(), backend="numpy")
for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()):
if info is not None:
rep_output["annotators"][key] = {"render_product": {"data": data, **info}}
else:
rep_output["annotators"][key] = {"render_product": {"data": data}}
else:
rep_output = dict()
camera_data = convert_dict_to_backend(camera.data.output[0].to_dict(), backend="numpy")
for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()):
if info is not None:
rep_output[key] = {"data": data, "info": info}
else:
rep_output[key] = data
# Save images
rep_output["trigger_outputs"] = {"on_time": camera.frame[0]}
rep_writer.write(rep_output)
print("----------------------------------------")
# Check image data
for im_data in camera.data.output.values():
self.assertEqual(im_data.shape, (1, camera_cfg.height, camera_cfg.width))
"""
Helper functions.
"""
@staticmethod
def _populate_scene():
"""Add prims to the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.SphereLightCfg()
cfg.func("/World/Light/GreySphere", cfg, translation=(4.5, 3.5, 10.0))
cfg.func("/World/Light/WhiteSphere", cfg, translation=(-4.5, 3.5, 10.0))
# Random objects
random.seed(0)
for i in range(10):
# sample random position
position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
position *= np.asarray([1.5, 1.5, 0.5])
# create prim
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
prim = prim_utils.create_prim(
f"/World/Objects/Obj_{i:02d}",
prim_type,
translation=position,
scale=(0.25, 0.25, 0.25),
semantic_label=prim_type,
)
# cast to geom prim
geom_prim = getattr(UsdGeom, prim_type)(prim)
# set random color
color = Gf.Vec3f(random.random(), random.random(), random.random())
geom_prim.CreateDisplayColorAttr()
geom_prim.GetDisplayColorAttr().Set([color])
# add rigid properties
GeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True)
RigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0)
if __name__ == "__main__":
run_tests()
| 22,171 |
Python
| 42.304687 | 119 | 0.610978 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sensors/test_frame_transformer.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script checks the FrameTransformer sensor by visualizing the frames that it creates.
"""
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import math
import scipy.spatial.transform as tf
import torch
import unittest
import omni.isaac.core.utils.stage as stage_utils
import omni.isaac.lab.sim as sim_utils
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg
from omni.isaac.lab.sensors import FrameTransformerCfg, OffsetCfg
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
##
# Pre-defined configs
##
from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort:skip
def quat_from_euler_rpy(roll, pitch, yaw, degrees=False):
"""Converts Euler XYZ to Quaternion (w, x, y, z)."""
quat = tf.Rotation.from_euler("xyz", (roll, pitch, yaw), degrees=degrees).as_quat()
return tuple(quat[[3, 0, 1, 2]].tolist())
def euler_rpy_apply(rpy, xyz, degrees=False):
"""Applies rotation from Euler XYZ on position vector."""
rot = tf.Rotation.from_euler("xyz", rpy, degrees=degrees)
return tuple(rot.apply(xyz).tolist())
@configclass
class MySceneCfg(InteractiveSceneCfg):
"""Example scene configuration."""
# terrain - flat terrain plane
terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane")
# articulation - robot
robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# sensors - frame transformer (filled inside unit test)
frame_transformer: FrameTransformerCfg = None
class TestFrameTransformer(unittest.TestCase):
"""Test for frame transformer sensor."""
def setUp(self):
"""Create a blank new stage for each test."""
# Create a new stage
stage_utils.create_new_stage()
# Load kit helper
self.sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.005))
# Set main camera
self.sim.set_camera_view(eye=[5, 5, 5], target=[0.0, 0.0, 0.0])
def tearDown(self):
"""Stops simulator after each test."""
# stop simulation
# self.sim.stop()
# clear the stage
self.sim.clear_all_callbacks()
self.sim.clear_instance()
"""
Tests
"""
def test_frame_transformer_feet_wrt_base(self):
"""Test feet transformations w.r.t. base source frame.
In this test, the source frame is the robot base. This frame is at index 0, when
the frame bodies are sorted in the order of the regex matching in the frame transformer.
"""
# Spawn things into stage
scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False)
scene_cfg.frame_transformer = FrameTransformerCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
target_frames=[
FrameTransformerCfg.FrameCfg(
name="LF_FOOT_USER",
prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK",
offset=OffsetCfg(
pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)),
rot=quat_from_euler_rpy(0, 0, -math.pi / 2),
),
),
FrameTransformerCfg.FrameCfg(
name="RF_FOOT_USER",
prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK",
offset=OffsetCfg(
pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)),
rot=quat_from_euler_rpy(0, 0, math.pi / 2),
),
),
FrameTransformerCfg.FrameCfg(
name="LH_FOOT_USER",
prim_path="{ENV_REGEX_NS}/Robot/LH_SHANK",
offset=OffsetCfg(
pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(-0.08795, 0.01305, -0.33797)),
rot=quat_from_euler_rpy(0, 0, -math.pi / 2),
),
),
FrameTransformerCfg.FrameCfg(
name="RH_FOOT_USER",
prim_path="{ENV_REGEX_NS}/Robot/RH_SHANK",
offset=OffsetCfg(
pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(-0.08795, -0.01305, -0.33797)),
rot=quat_from_euler_rpy(0, 0, math.pi / 2),
),
),
],
)
scene = InteractiveScene(scene_cfg)
# Play the simulator
self.sim.reset()
# Acquire the index of ground truth bodies
feet_indices, feet_names = scene.articulations["robot"].find_bodies(
["LF_FOOT", "RF_FOOT", "LH_FOOT", "RH_FOOT"]
)
# Check names are parsed the same order
user_feet_names = [f"{name}_USER" for name in feet_names]
self.assertListEqual(scene.sensors["frame_transformer"].data.target_frame_names, user_feet_names)
# default joint targets
default_actions = scene.articulations["robot"].data.default_joint_pos.clone()
# Define simulation stepping
sim_dt = self.sim.get_physics_dt()
# Simulate physics
for count in range(100):
# # reset
if count % 25 == 0:
# reset root state
root_state = scene.articulations["robot"].data.default_root_state.clone()
root_state[:, :3] += scene.env_origins
joint_pos = scene.articulations["robot"].data.default_joint_pos
joint_vel = scene.articulations["robot"].data.default_joint_vel
# -- set root state
# -- robot
scene.articulations["robot"].write_root_state_to_sim(root_state)
scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel)
# reset buffers
scene.reset()
# set joint targets
robot_actions = default_actions + 0.5 * torch.randn_like(default_actions)
scene.articulations["robot"].set_joint_position_target(robot_actions)
# write data to sim
scene.write_data_to_sim()
# perform step
self.sim.step()
# read data from sim
scene.update(sim_dt)
# check absolute frame transforms in world frame
# -- ground-truth
root_pose_w = scene.articulations["robot"].data.root_state_w[:, :7]
feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w[:, feet_indices]
feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w[:, feet_indices]
# -- frame transformer
source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w
source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w
feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w
feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w
# check if they are same
torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf, rtol=1e-3, atol=1e-3)
# check if relative transforms are same
feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source
feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source
for index in range(len(feet_indices)):
# ground-truth
foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms(
root_pose_w[:, :3], root_pose_w[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index]
)
# check if they are same
torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b, rtol=1e-3, atol=1e-3)
def test_frame_transformer_feet_wrt_thigh(self):
"""Test feet transformation w.r.t. thigh source frame.
In this test, the source frame is the LF leg's thigh frame. This frame is not at index 0,
when the frame bodies are sorted in the order of the regex matching in the frame transformer.
"""
# Spawn things into stage
scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False)
scene_cfg.frame_transformer = FrameTransformerCfg(
prim_path="{ENV_REGEX_NS}/Robot/LF_THIGH",
target_frames=[
FrameTransformerCfg.FrameCfg(
name="LF_FOOT_USER",
prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK",
offset=OffsetCfg(
pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)),
rot=quat_from_euler_rpy(0, 0, -math.pi / 2),
),
),
FrameTransformerCfg.FrameCfg(
name="RF_FOOT_USER",
prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK",
offset=OffsetCfg(
pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)),
rot=quat_from_euler_rpy(0, 0, math.pi / 2),
),
),
],
)
scene = InteractiveScene(scene_cfg)
# Play the simulator
self.sim.reset()
# Acquire the index of ground truth bodies
source_frame_index = scene.articulations["robot"].find_bodies("LF_THIGH")[0][0]
feet_indices, feet_names = scene.articulations["robot"].find_bodies(["LF_FOOT", "RF_FOOT"])
# Check names are parsed the same order
user_feet_names = [f"{name}_USER" for name in feet_names]
self.assertListEqual(scene.sensors["frame_transformer"].data.target_frame_names, user_feet_names)
# default joint targets
default_actions = scene.articulations["robot"].data.default_joint_pos.clone()
# Define simulation stepping
sim_dt = self.sim.get_physics_dt()
# Simulate physics
for count in range(100):
# # reset
if count % 25 == 0:
# reset root state
root_state = scene.articulations["robot"].data.default_root_state.clone()
root_state[:, :3] += scene.env_origins
joint_pos = scene.articulations["robot"].data.default_joint_pos
joint_vel = scene.articulations["robot"].data.default_joint_vel
# -- set root state
# -- robot
scene.articulations["robot"].write_root_state_to_sim(root_state)
scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel)
# reset buffers
scene.reset()
# set joint targets
robot_actions = default_actions + 0.5 * torch.randn_like(default_actions)
scene.articulations["robot"].set_joint_position_target(robot_actions)
# write data to sim
scene.write_data_to_sim()
# perform step
self.sim.step()
# read data from sim
scene.update(sim_dt)
# check absolute frame transforms in world frame
# -- ground-truth
source_pose_w_gt = scene.articulations["robot"].data.body_state_w[:, source_frame_index, :7]
feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w[:, feet_indices]
feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w[:, feet_indices]
# -- frame transformer
source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w
source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w
feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w
feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w
# check if they are same
torch.testing.assert_close(source_pose_w_gt[:, :3], source_pos_w_tf, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(source_pose_w_gt[:, 3:], source_quat_w_tf, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf, rtol=1e-3, atol=1e-3)
# check if relative transforms are same
feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source
feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source
for index in range(len(feet_indices)):
# ground-truth
foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms(
source_pose_w_gt[:, :3], source_pose_w_gt[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index]
)
# check if they are same
torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b, rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
run_tests()
| 13,916 |
Python
| 44.185065 | 119 | 0.577537 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sensors/test_contact_sensor.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Tests to verify contact sensor functionality on rigid object prims."""
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
HEADLESS = True
# launch omniverse app
app_launcher = AppLauncher(headless=HEADLESS)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import unittest
from dataclasses import MISSING
from enum import Enum
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import RigidObject, RigidObjectCfg
from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg
from omni.isaac.lab.sensors import ContactSensor, ContactSensorCfg
from omni.isaac.lab.sim import build_simulation_context
from omni.isaac.lab.terrains import HfRandomUniformTerrainCfg, TerrainGeneratorCfg, TerrainImporterCfg
from omni.isaac.lab.utils import configclass
##
# Custom helper classes.
##
class ContactTestMode(Enum):
"""Enum to declare the type of contact sensor test to execute."""
IN_CONTACT = 0
"""Enum to test the condition where the test object is in contact with the ground plane."""
NON_CONTACT = 1
"""Enum to test the condition where the test object is not in contact with the ground plane (air time)."""
@configclass
class TestContactSensorRigidObjectCfg(RigidObjectCfg):
"""Configuration for rigid objects used for the contact sensor test.
This contains the expected values in the configuration to simplify test fixtures.
"""
contact_pose: torch.Tensor = MISSING
"""6D pose of the rigid object under test when it is in contact with the ground surface."""
non_contact_pose: torch.Tensor = MISSING
"""6D pose of the rigid object under test when it is not in contact."""
@configclass
class ContactSensorSceneCfg(InteractiveSceneCfg):
"""Configuration of the scene used by the contact sensor test."""
terrain: TerrainImporterCfg = MISSING
"""Terrain configuration within the scene."""
shape: TestContactSensorRigidObjectCfg = MISSING
"""RigidObject contact prim configuration."""
contact_sensor: ContactSensorCfg = MISSING
"""Contact sensor configuration."""
shape_2: TestContactSensorRigidObjectCfg = None
"""RigidObject contact prim configuration. Defaults to None, i.e. not included in the scene.
This is a second prim used for testing contact filtering.
"""
contact_sensor_2: ContactSensorCfg = None
"""Contact sensor configuration. Defaults to None, i.e. not included in the scene.
This is a second contact sensor used for testing contact filtering.
"""
##
# Scene entity configurations.
##
CUBE_CFG = TestContactSensorRigidObjectCfg(
prim_path="/World/Objects/Cube",
spawn=sim_utils.CuboidCfg(
size=(0.5, 0.5, 0.5),
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
),
collision_props=sim_utils.CollisionPropertiesCfg(
collision_enabled=True,
),
activate_contact_sensors=True,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.4, 0.6, 0.4)),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(0, -1.0, 1.0)),
contact_pose=torch.tensor([0, -1.0, 0, 1, 0, 0, 0]),
non_contact_pose=torch.tensor([0, -1.0, 1.0, 1, 0, 0, 0]),
)
"""Configuration of the cube prim."""
SPHERE_CFG = TestContactSensorRigidObjectCfg(
prim_path="/World/Objects/Sphere",
spawn=sim_utils.SphereCfg(
radius=0.25,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
),
collision_props=sim_utils.CollisionPropertiesCfg(
collision_enabled=True,
),
activate_contact_sensors=True,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.4, 0.4, 0.6)),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(0, 1.0, 1.0)),
contact_pose=torch.tensor([0, 1.0, 0.0, 1, 0, 0, 0]),
non_contact_pose=torch.tensor([0, 1.0, 1.0, 1, 0, 0, 0]),
)
"""Configuration of the sphere prim."""
CYLINDER_CFG = TestContactSensorRigidObjectCfg(
prim_path="/World/Objects/Cylinder",
spawn=sim_utils.CylinderCfg(
radius=0.5,
height=0.01,
axis="Y",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
),
collision_props=sim_utils.CollisionPropertiesCfg(
collision_enabled=True,
),
activate_contact_sensors=True,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.6, 0.4, 0.4)),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(0, 0.0, 1.0)),
contact_pose=torch.tensor([0, 0, 0.0, 1, 0, 0, 0]),
non_contact_pose=torch.tensor([0, 0, 1.0, 1, 0, 0, 0]),
)
"""Configuration of the cylinder prim."""
CAPSULE_CFG = TestContactSensorRigidObjectCfg(
prim_path="/World/Objects/Capsule",
spawn=sim_utils.CapsuleCfg(
radius=0.25,
height=0.5,
axis="Z",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
),
collision_props=sim_utils.CollisionPropertiesCfg(
collision_enabled=True,
),
activate_contact_sensors=True,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.2, 0.4, 0.4)),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(1.0, 0.0, 1.5)),
contact_pose=torch.tensor([1.0, 0.0, 0.0, 1, 0, 0, 0]),
non_contact_pose=torch.tensor([1.0, 0.0, 1.5, 1, 0, 0, 0]),
)
"""Configuration of the capsule prim."""
CONE_CFG = TestContactSensorRigidObjectCfg(
prim_path="/World/Objects/Cone",
spawn=sim_utils.ConeCfg(
radius=0.5,
height=0.5,
axis="Z",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
),
collision_props=sim_utils.CollisionPropertiesCfg(
collision_enabled=True,
),
activate_contact_sensors=True,
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.4, 0.2, 0.4)),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0, 0.0, 1.0)),
contact_pose=torch.tensor([-1.0, 0.0, 0.0, 1, 0, 0, 0]),
non_contact_pose=torch.tensor([-1.0, 0.0, 1.0, 1, 0, 0, 0]),
)
"""Configuration of the cone prim."""
FLAT_TERRAIN_CFG = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane")
"""Configuration of the flat ground plane."""
COBBLESTONE_TERRAIN_CFG = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="generator",
terrain_generator=TerrainGeneratorCfg(
seed=0,
size=(3.0, 3.0),
border_width=0.0,
num_rows=1,
num_cols=1,
sub_terrains={
"random_rough": HfRandomUniformTerrainCfg(
proportion=1.0, noise_range=(0.0, 0.05), noise_step=0.01, border_width=0.25
),
},
),
)
"""Configuration of the generated mesh terrain."""
class TestContactSensor(unittest.TestCase):
"""Unittest class for testing the contact sensor.
This class includes test cases for the available rigid object primitives, and tests that the
the contact sensor is reporting correct results for various contact durations, terrain types, and
evaluation devices.
"""
@classmethod
def setUpClass(cls):
"""Contact sensor test suite init."""
cls.sim_dt = 0.0025
cls.durations = [cls.sim_dt, cls.sim_dt * 2, cls.sim_dt * 32, cls.sim_dt * 128]
cls.terrains = [FLAT_TERRAIN_CFG, COBBLESTONE_TERRAIN_CFG]
cls.devices = ["cuda:0", "cpu"]
def test_cube_contact_time(self):
"""Checks contact sensor values for contact time and air time for a cube collision primitive."""
self._run_contact_sensor_test(shape_cfg=CUBE_CFG)
def test_sphere_contact_time(self):
"""Checks contact sensor values for contact time and air time for a sphere collision primitive."""
self._run_contact_sensor_test(shape_cfg=SPHERE_CFG)
def test_cube_stack_contact_filtering(self):
"""Checks contact sensor reporting for filtering stacked cube prims."""
for device in self.devices:
for num_envs in [1, 6, 24]:
with self.subTest(device=device, num_envs=num_envs):
with build_simulation_context(device=device, dt=self.sim_dt, add_lighting=True) as sim:
# Instance new scene for the current terrain and contact prim.
scene_cfg = ContactSensorSceneCfg(num_envs=num_envs, env_spacing=1.0, lazy_sensor_update=False)
scene_cfg.terrain = FLAT_TERRAIN_CFG.replace(prim_path="/World/ground")
# -- cube 1
scene_cfg.shape = CUBE_CFG.replace(prim_path="{ENV_REGEX_NS}/Cube_1")
scene_cfg.shape.init_state.pos = (0, -1.0, 1.0)
# -- cube 2 (on top of cube 1)
scene_cfg.shape_2 = CUBE_CFG.replace(prim_path="{ENV_REGEX_NS}/Cube_2")
scene_cfg.shape_2.init_state.pos = (0, -1.0, 1.525)
# -- contact sensor 1
scene_cfg.contact_sensor = ContactSensorCfg(
prim_path="{ENV_REGEX_NS}/Cube_1",
track_pose=True,
debug_vis=False,
update_period=0.0,
filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube_2"],
)
# -- contact sensor 2
scene_cfg.contact_sensor_2 = ContactSensorCfg(
prim_path="{ENV_REGEX_NS}/Cube_2",
track_pose=True,
debug_vis=False,
update_period=0.0,
filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube_1"],
)
scene = InteractiveScene(scene_cfg)
# Set variables internally for reference
self.sim = sim
self.scene = scene
# Play the simulation
self.sim.reset()
# Extract from scene for type hinting
contact_sensor: ContactSensor = self.scene["contact_sensor"]
contact_sensor_2: ContactSensor = self.scene["contact_sensor_2"]
# Check buffers have the right size
self.assertEqual(contact_sensor.contact_physx_view.filter_count, 1)
self.assertEqual(contact_sensor_2.contact_physx_view.filter_count, 1)
# Reset the contact sensors
self.scene.reset()
# Let the scene come to a rest
for _ in range(20):
self._perform_sim_step()
# Check values for cube 2
torch.testing.assert_close(
contact_sensor_2.data.force_matrix_w[:, :, 0], contact_sensor_2.data.net_forces_w
)
torch.testing.assert_close(
contact_sensor_2.data.force_matrix_w[:, :, 0], contact_sensor.data.force_matrix_w[:, :, 0]
)
"""
Internal helpers.
"""
def _run_contact_sensor_test(self, shape_cfg: TestContactSensorRigidObjectCfg):
"""Runs a rigid body test for a given contact primitive configuration.
This method iterates through each device and terrain combination in the simulation environment,
running tests for contact sensors.
Args:
shape_cfg: The configuration parameters for the shape to be tested.
"""
for device in self.devices:
for terrain in self.terrains:
with self.subTest(device=device, terrain=terrain):
with build_simulation_context(device=device, dt=self.sim_dt, add_lighting=True) as sim:
# Instance new scene for the current terrain and contact prim.
scene_cfg = ContactSensorSceneCfg(num_envs=1, env_spacing=1.0, lazy_sensor_update=False)
scene_cfg.terrain = terrain
scene_cfg.shape = shape_cfg
scene_cfg.contact_sensor = ContactSensorCfg(
prim_path=shape_cfg.prim_path,
track_pose=True,
debug_vis=False,
update_period=0.0,
track_air_time=True,
history_length=3,
)
scene = InteractiveScene(scene_cfg)
# Set variables internally for reference
self.sim = sim
self.scene = scene
# Play the simulation
self.sim.reset()
# Run contact time and air time tests.
self._test_sensor_contact(
shape=self.scene["shape"],
sensor=self.scene["contact_sensor"],
mode=ContactTestMode.IN_CONTACT,
)
self._test_sensor_contact(
shape=self.scene["shape"],
sensor=self.scene["contact_sensor"],
mode=ContactTestMode.NON_CONTACT,
)
def _test_sensor_contact(self, shape: RigidObject, sensor: ContactSensor, mode: ContactTestMode):
"""Test for the contact sensor.
This test sets the contact prim to a pose either in contact or out of contact with the ground plane for
a known duration. Once the contact duration has elapsed, the data stored inside the contact sensor
associated with the contact prim is checked against the expected values.
This process is repeated for all elements in :attr:`TestContactSensor.durations`, where each successive
contact timing test is punctuated by setting the contact prim to the complement of the desired contact mode
for 1 sim time-step.
Args:
shape: The contact prim used for the contact sensor test.
sensor: The sensor reporting data to be verified by the contact sensor test.
mode: The contact test mode: either contact with ground plane or air time.
"""
# reset the test state
sensor.reset()
expected_last_test_contact_time = 0
expected_last_reset_contact_time = 0
# set poses for shape for a given contact sensor test mode.
# desired contact mode to set for a given duration.
test_pose = None
# complement of the desired contact mode used to reset the contact sensor.
reset_pose = None
if mode == ContactTestMode.IN_CONTACT:
test_pose = shape.cfg.contact_pose
reset_pose = shape.cfg.non_contact_pose
elif mode == ContactTestMode.NON_CONTACT:
test_pose = shape.cfg.non_contact_pose
reset_pose = shape.cfg.contact_pose
else:
raise ValueError("Received incompatible contact sensor test mode")
for idx in range(len(self.durations)):
current_test_time = 0
duration = self.durations[idx]
while current_test_time < duration:
# set object states to contact the ground plane
shape.write_root_pose_to_sim(root_pose=test_pose)
# perform simulation step
self._perform_sim_step()
# increment contact time
current_test_time += self.sim_dt
# set last contact time to the previous desired contact duration plus the extra dt allowance.
expected_last_test_contact_time = self.durations[idx - 1] + self.sim_dt if idx > 0 else 0
# Check the data inside the contact sensor
if mode == ContactTestMode.IN_CONTACT:
self._check_prim_contact_state_times(
sensor=sensor,
expected_air_time=0.0,
expected_contact_time=self.durations[idx],
expected_last_contact_time=expected_last_test_contact_time,
expected_last_air_time=expected_last_reset_contact_time,
dt=duration + self.sim_dt,
)
elif mode == ContactTestMode.NON_CONTACT:
self._check_prim_contact_state_times(
sensor=sensor,
expected_air_time=self.durations[idx],
expected_contact_time=0.0,
expected_last_contact_time=expected_last_reset_contact_time,
expected_last_air_time=expected_last_test_contact_time,
dt=duration + self.sim_dt,
)
# switch the contact mode for 1 dt step before the next contact test begins.
shape.write_root_pose_to_sim(root_pose=reset_pose)
# perform simulation step
self._perform_sim_step()
# set the last air time to 2 sim_dt steps, because last_air_time and last_contact_time
# adds an additional sim_dt to the total time spent in the previous contact mode for uncertainty in
# when the contact switch happened in between a dt step.
expected_last_reset_contact_time = 2 * self.sim_dt
def _check_prim_contact_state_times(
self,
sensor: ContactSensor,
expected_air_time: float,
expected_contact_time: float,
expected_last_air_time: float,
expected_last_contact_time: float,
dt: float,
) -> None:
"""Checks contact sensor data matches expected values.
Args:
sensor: Instance of ContactSensor containing data to be tested.
expected_air_time: Air time ground truth.
expected_contact_time: Contact time ground truth.
expected_last_air_time: Last air time ground truth.
expected_last_contact_time: Last contact time ground truth.
dt: Time since previous contact mode switch. If the contact prim left contact 0.1 seconds ago,
dt should be 0.1 + simulation dt seconds.
"""
# store current state of the contact prim
in_air = False
in_contact = False
if expected_air_time > 0.0:
in_air = True
if expected_contact_time > 0.0:
in_contact = True
measured_contact_time = sensor.data.current_contact_time
measured_air_time = sensor.data.current_air_time
measured_last_contact_time = sensor.data.last_contact_time
measured_last_air_time = sensor.data.last_air_time
# check current contact state
self.assertAlmostEqual(measured_contact_time.item(), expected_contact_time, places=2)
self.assertAlmostEqual(measured_air_time.item(), expected_air_time, places=2)
# check last contact state
self.assertAlmostEqual(measured_last_contact_time.item(), expected_last_contact_time, places=2)
self.assertAlmostEqual(measured_last_air_time.item(), expected_last_air_time, places=2)
# check current contact mode
self.assertEqual(sensor.compute_first_contact(dt=dt).item(), in_contact)
self.assertEqual(sensor.compute_first_air(dt=dt).item(), in_air)
def _perform_sim_step(self) -> None:
"""Updates sensors and steps the contact sensor test scene."""
# write data to simulation
self.scene.write_data_to_sim()
# simulate
self.sim.step(render=not HEADLESS)
# update buffers at sim dt
self.scene.update(dt=self.sim_dt)
if __name__ == "__main__":
run_tests()
| 20,156 |
Python
| 41.257862 | 119 | 0.59749 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sensors/test_tiled_camera.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True, enable_cameras=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import copy
import numpy as np
import random
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.replicator.core as rep
from omni.isaac.core.prims import GeometryPrim, RigidPrim
from pxr import Gf, UsdGeom
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.sensors.camera import TiledCamera, TiledCameraCfg
from omni.isaac.lab.utils.timer import Timer
class TestTiledCamera(unittest.TestCase):
"""Test for USD tiled Camera sensor."""
def setUp(self):
"""Create a blank new stage for each test."""
self.camera_cfg = TiledCameraCfg(
height=128,
width=256,
offset=TiledCameraCfg.OffsetCfg(pos=(0.0, 0.0, 4.0), rot=(0.0, 0.0, 1.0, 0.0), convention="ros"),
prim_path="/World/Camera",
update_period=0,
data_types=["rgb", "depth"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
),
)
# Create a new stage
stage_utils.create_new_stage()
# Simulation time-step
self.dt = 0.01
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=self.dt)
self.sim: sim_utils.SimulationContext = sim_utils.SimulationContext(sim_cfg)
# populate scene
self._populate_scene()
# load stage
stage_utils.update_stage()
def tearDown(self):
"""Stops simulator after each test."""
# close all the opened viewport from before.
rep.vp_manager.destroy_hydra_textures("Replicator")
# stop simulation
# note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :(
self.sim._timeline.stop()
# clear the stage
self.sim.clear_all_callbacks()
self.sim.clear_instance()
"""
Tests
"""
def test_single_camera_init(self):
"""Test single camera initialization."""
# Create camera
camera = TiledCamera(self.camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[0].GetPath().pathString, self.camera_cfg.prim_path)
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (1, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (1, 4))
self.assertEqual(camera.data.quat_w_world.shape, (1, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (1, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (1, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for im_type, im_data in camera.data.output.to_dict().items():
if im_type == "rgb":
self.assertEqual(im_data.shape, (1, self.camera_cfg.height, self.camera_cfg.width, 3))
else:
self.assertEqual(im_data.shape, (1, self.camera_cfg.height, self.camera_cfg.width, 1))
self.assertGreater(im_data.mean().item(), 0.0)
del camera
def test_multi_camera_init(self):
"""Test multi-camera initialization."""
prim_utils.create_prim("/World/Origin_00", "Xform")
prim_utils.create_prim("/World/Origin_01", "Xform")
# Create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.prim_path = "/World/Origin_.*/CameraSensor"
camera = TiledCamera(camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[1].GetPath().pathString, "/World/Origin_01/CameraSensor")
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (2, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (2, 4))
self.assertEqual(camera.data.quat_w_world.shape, (2, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (2, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (2, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for im_type, im_data in camera.data.output.to_dict().items():
if im_type == "rgb":
self.assertEqual(im_data.shape, (2, self.camera_cfg.height, self.camera_cfg.width, 3))
else:
self.assertEqual(im_data.shape, (2, self.camera_cfg.height, self.camera_cfg.width, 1))
self.assertGreater(im_data[0].mean().item(), 0.0)
self.assertGreater(im_data[1].mean().item(), 0.0)
del camera
def test_rgb_only_camera(self):
"""Test initialization with only RGB."""
prim_utils.create_prim("/World/Origin_00", "Xform")
prim_utils.create_prim("/World/Origin_01", "Xform")
# Create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.data_types = ["rgb"]
camera_cfg.prim_path = "/World/Origin_.*/CameraSensor"
camera = TiledCamera(camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[1].GetPath().pathString, "/World/Origin_01/CameraSensor")
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
self.assertListEqual(list(camera.data.output.keys()), ["rgb"])
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (2, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (2, 4))
self.assertEqual(camera.data.quat_w_world.shape, (2, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (2, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (2, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for _, im_data in camera.data.output.to_dict().items():
self.assertEqual(im_data.shape, (2, self.camera_cfg.height, self.camera_cfg.width, 3))
self.assertGreater(im_data[0].mean().item(), 0.0)
self.assertGreater(im_data[1].mean().item(), 0.0)
del camera
def test_depth_only_camera(self):
"""Test initialization with only depth."""
prim_utils.create_prim("/World/Origin_00", "Xform")
prim_utils.create_prim("/World/Origin_01", "Xform")
# Create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.data_types = ["depth"]
camera_cfg.prim_path = "/World/Origin_.*/CameraSensor"
camera = TiledCamera(camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[1].GetPath().pathString, "/World/Origin_01/CameraSensor")
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
self.assertListEqual(list(camera.data.output.keys()), ["depth"])
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (2, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (2, 4))
self.assertEqual(camera.data.quat_w_world.shape, (2, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (2, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (2, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for _, im_data in camera.data.output.to_dict().items():
self.assertEqual(im_data.shape, (2, self.camera_cfg.height, self.camera_cfg.width, 1))
self.assertGreater(im_data[0].mean().item(), 0.0)
self.assertGreater(im_data[1].mean().item(), 0.0)
del camera
def test_throughput(self):
"""Test tiled camera throughput."""
# create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.height = 480
camera_cfg.width = 640
camera = TiledCamera(camera_cfg)
# Play simulator
self.sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Simulate physics
for _ in range(5):
# perform rendering
self.sim.step()
# update camera
with Timer(f"Time taken for updating camera with shape {camera.image_shape}"):
camera.update(self.dt)
# Check image data
for im_type, im_data in camera.data.output.to_dict().items():
if im_type == "rgb":
self.assertEqual(im_data.shape, (1, camera_cfg.height, camera_cfg.width, 3))
else:
self.assertEqual(im_data.shape, (1, camera_cfg.height, camera_cfg.width, 1))
self.assertGreater(im_data.mean().item(), 0.0)
del camera
"""
Helper functions.
"""
@staticmethod
def _populate_scene():
"""Add prims to the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.SphereLightCfg()
cfg.func("/World/Light/GreySphere", cfg, translation=(4.5, 3.5, 10.0))
cfg.func("/World/Light/WhiteSphere", cfg, translation=(-4.5, 3.5, 10.0))
# Random objects
random.seed(0)
for i in range(10):
# sample random position
position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
position *= np.asarray([1.5, 1.5, 0.5])
# create prim
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
prim = prim_utils.create_prim(
f"/World/Objects/Obj_{i:02d}",
prim_type,
translation=position,
scale=(0.25, 0.25, 0.25),
semantic_label=prim_type,
)
# cast to geom prim
geom_prim = getattr(UsdGeom, prim_type)(prim)
# set random color
color = Gf.Vec3f(random.random(), random.random(), random.random())
geom_prim.CreateDisplayColorAttr()
geom_prim.GetDisplayColorAttr().Set([color])
# add rigid properties
GeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True)
RigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0)
if __name__ == "__main__":
run_tests()
| 14,127 |
Python
| 40.069767 | 116 | 0.604799 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sensors/test_ray_caster_camera.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True, enable_cameras=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import copy
import numpy as np
import os
import torch
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.replicator.core as rep
from pxr import Gf
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.sensors.camera import Camera, CameraCfg
from omni.isaac.lab.sensors.ray_caster import RayCasterCamera, RayCasterCameraCfg, patterns
from omni.isaac.lab.sim import PinholeCameraCfg
from omni.isaac.lab.terrains.trimesh.utils import make_plane
from omni.isaac.lab.terrains.utils import create_prim_from_mesh
from omni.isaac.lab.utils import convert_dict_to_backend
from omni.isaac.lab.utils.timer import Timer
# sample camera poses
POSITION = [2.5, 2.5, 2.5]
QUAT_ROS = [-0.17591989, 0.33985114, 0.82047325, -0.42470819]
QUAT_OPENGL = [0.33985113, 0.17591988, 0.42470818, 0.82047324]
QUAT_WORLD = [-0.3647052, -0.27984815, -0.1159169, 0.88047623]
class TestWarpCamera(unittest.TestCase):
"""Test for isaaclab camera sensor"""
"""
Test Setup and Teardown
"""
def setUp(self):
"""Create a blank new stage for each test."""
camera_pattern_cfg = patterns.PinholeCameraPatternCfg(
focal_length=24.0,
horizontal_aperture=20.955,
height=480,
width=640,
)
self.camera_cfg = RayCasterCameraCfg(
prim_path="/World/Camera",
mesh_prim_paths=["/World/defaultGroundPlane"],
update_period=0,
offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"),
debug_vis=False,
pattern_cfg=camera_pattern_cfg,
data_types=[
"distance_to_image_plane",
],
)
# Create a new stage
stage_utils.create_new_stage()
# create xform because placement of camera directly under world is not supported
prim_utils.create_prim("/World/Camera", "Xform")
# Simulation time-step
self.dt = 0.01
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=self.dt)
self.sim: sim_utils.SimulationContext = sim_utils.SimulationContext(sim_cfg)
# Ground-plane
mesh = make_plane(size=(2e1, 2e1), height=0.0, center_zero=True)
create_prim_from_mesh("/World/defaultGroundPlane", mesh)
# load stage
stage_utils.update_stage()
def tearDown(self):
"""Stops simulator after each test."""
# close all the opened viewport from before.
rep.vp_manager.destroy_hydra_textures("Replicator")
# stop simulation
# note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :(
self.sim._timeline.stop()
# clear the stage
self.sim.clear_all_callbacks()
self.sim.clear_instance()
"""
Tests
"""
def test_camera_init(self):
"""Test camera initialization."""
# Create camera
camera = RayCasterCamera(cfg=self.camera_cfg)
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (1, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (1, 4))
self.assertEqual(camera.data.quat_w_world.shape, (1, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (1, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (1, 3, 3))
self.assertEqual(
camera.data.image_shape, (self.camera_cfg.pattern_cfg.height, self.camera_cfg.pattern_cfg.width)
)
self.assertEqual(camera.data.info, [{self.camera_cfg.data_types[0]: None}])
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for im_data in camera.data.output.to_dict().values():
self.assertEqual(
im_data.shape, (1, self.camera_cfg.pattern_cfg.height, self.camera_cfg.pattern_cfg.width)
)
def test_camera_resolution(self):
"""Test camera resolution is correctly set."""
# Create camera
camera = RayCasterCamera(cfg=self.camera_cfg)
# Play sim
self.sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
camera.update(self.dt)
# access image data and compare shapes
for im_data in camera.data.output.to_dict().values():
self.assertTrue(im_data.shape == (1, self.camera_cfg.pattern_cfg.height, self.camera_cfg.pattern_cfg.width))
def test_camera_init_offset(self):
"""Test camera initialization with offset using different conventions."""
# define the same offset in all conventions
# -- ROS convention
cam_cfg_offset_ros = copy.deepcopy(self.camera_cfg)
cam_cfg_offset_ros.offset = RayCasterCameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_ROS,
convention="ros",
)
prim_utils.create_prim("/World/CameraOffsetRos", "Xform")
cam_cfg_offset_ros.prim_path = "/World/CameraOffsetRos"
camera_ros = RayCasterCamera(cam_cfg_offset_ros)
# -- OpenGL convention
cam_cfg_offset_opengl = copy.deepcopy(self.camera_cfg)
cam_cfg_offset_opengl.offset = RayCasterCameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_OPENGL,
convention="opengl",
)
prim_utils.create_prim("/World/CameraOffsetOpengl", "Xform")
cam_cfg_offset_opengl.prim_path = "/World/CameraOffsetOpengl"
camera_opengl = RayCasterCamera(cam_cfg_offset_opengl)
# -- World convention
cam_cfg_offset_world = copy.deepcopy(self.camera_cfg)
cam_cfg_offset_world.offset = RayCasterCameraCfg.OffsetCfg(
pos=POSITION,
rot=QUAT_WORLD,
convention="world",
)
prim_utils.create_prim("/World/CameraOffsetWorld", "Xform")
cam_cfg_offset_world.prim_path = "/World/CameraOffsetWorld"
camera_world = RayCasterCamera(cam_cfg_offset_world)
# play sim
self.sim.reset()
# update cameras
camera_world.update(self.dt)
camera_opengl.update(self.dt)
camera_ros.update(self.dt)
# check that all transforms are set correctly
np.testing.assert_allclose(camera_ros.data.pos_w[0].cpu().numpy(), cam_cfg_offset_ros.offset.pos)
np.testing.assert_allclose(camera_opengl.data.pos_w[0].cpu().numpy(), cam_cfg_offset_opengl.offset.pos)
np.testing.assert_allclose(camera_world.data.pos_w[0].cpu().numpy(), cam_cfg_offset_world.offset.pos)
# check if transform correctly set in output
np.testing.assert_allclose(camera_ros.data.pos_w[0].cpu().numpy(), cam_cfg_offset_ros.offset.pos, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_ros[0].cpu().numpy(), QUAT_ROS, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_opengl[0].cpu().numpy(), QUAT_OPENGL, rtol=1e-5)
np.testing.assert_allclose(camera_ros.data.quat_w_world[0].cpu().numpy(), QUAT_WORLD, rtol=1e-5)
def test_multi_camera_init(self):
"""Test multi-camera initialization."""
# create two cameras with different prim paths
# -- camera 1
cam_cfg_1 = copy.deepcopy(self.camera_cfg)
cam_cfg_1.prim_path = "/World/Camera_1"
prim_utils.create_prim("/World/Camera_1", "Xform")
# Create camera
cam_1 = RayCasterCamera(cam_cfg_1)
# -- camera 2
cam_cfg_2 = copy.deepcopy(self.camera_cfg)
cam_cfg_2.prim_path = "/World/Camera_2"
prim_utils.create_prim("/World/Camera_2", "Xform")
cam_2 = RayCasterCamera(cam_cfg_2)
# check that the loaded meshes are equal
self.assertTrue(cam_1.meshes == cam_2.meshes)
# play sim
self.sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
cam_1.update(self.dt)
cam_2.update(self.dt)
# check image data
for cam in [cam_1, cam_2]:
for im_data in cam.data.output.to_dict().values():
self.assertEqual(
im_data.shape, (1, self.camera_cfg.pattern_cfg.height, self.camera_cfg.pattern_cfg.width)
)
def test_camera_set_world_poses(self):
"""Test camera function to set specific world pose."""
camera = RayCasterCamera(self.camera_cfg)
# play sim
self.sim.reset()
# convert to torch tensors
position = torch.tensor([POSITION], dtype=torch.float32, device=camera.device)
orientation = torch.tensor([QUAT_WORLD], dtype=torch.float32, device=camera.device)
# set new pose
camera.set_world_poses(position.clone(), orientation.clone(), convention="world")
# check if transform correctly set in output
torch.testing.assert_close(camera.data.pos_w, position)
torch.testing.assert_close(camera.data.quat_w_world, orientation)
def test_camera_set_world_poses_from_view(self):
"""Test camera function to set specific world pose from view."""
camera = RayCasterCamera(self.camera_cfg)
# play sim
self.sim.reset()
# convert to torch tensors
eyes = torch.tensor([POSITION], dtype=torch.float32, device=camera.device)
targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device)
quat_ros_gt = torch.tensor([QUAT_ROS], dtype=torch.float32, device=camera.device)
# set new pose
camera.set_world_poses_from_view(eyes.clone(), targets.clone())
# check if transform correctly set in output
torch.testing.assert_close(camera.data.pos_w, eyes)
torch.testing.assert_close(camera.data.quat_w_ros, quat_ros_gt)
def test_intrinsic_matrix(self):
"""Checks that the camera's set and retrieve methods work for intrinsic matrix."""
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.pattern_cfg.height = 240
camera_cfg.pattern_cfg.width = 320
camera = RayCasterCamera(camera_cfg)
# play sim
self.sim.reset()
# Desired properties (obtained from realsense camera at 320x240 resolution)
rs_intrinsic_matrix = [229.31640625, 0.0, 164.810546875, 0.0, 229.826171875, 122.1650390625, 0.0, 0.0, 1.0]
rs_intrinsic_matrix = torch.tensor(rs_intrinsic_matrix, device=camera.device).reshape(3, 3).unsqueeze(0)
# Set matrix into simulator
camera.set_intrinsic_matrices(rs_intrinsic_matrix.clone())
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# Check that matrix is correct
# TODO: This is not correctly setting all values in the matrix since the
# vertical aperture and aperture offsets are not being set correctly
# This is a bug in the simulator.
torch.testing.assert_close(rs_intrinsic_matrix[0, 0, 0], camera.data.intrinsic_matrices[0, 0, 0])
# torch.testing.assert_close(rs_intrinsic_matrix[0, 1, 1], camera.data.intrinsic_matrices[0, 1, 1])
def test_throughput(self):
"""Checks that the single camera gets created properly with a rig."""
# Create directory temp dir to dump the results
file_dir = os.path.dirname(os.path.realpath(__file__))
temp_dir = os.path.join(file_dir, "output", "camera", "throughput")
os.makedirs(temp_dir, exist_ok=True)
# Create replicator writer
rep_writer = rep.BasicWriter(output_dir=temp_dir, frame_padding=3)
# create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.pattern_cfg.height = 480
camera_cfg.pattern_cfg.width = 640
camera = RayCasterCamera(camera_cfg)
# Play simulator
self.sim.reset()
# Set camera pose
eyes = torch.tensor([[2.5, 2.5, 2.5]], dtype=torch.float32, device=camera.device)
targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device)
camera.set_world_poses_from_view(eyes, targets)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Simulate physics
for _ in range(5):
# perform rendering
self.sim.step()
# update camera
with Timer(f"Time taken for updating camera with shape {camera.image_shape}"):
camera.update(self.dt)
# Save images
with Timer(f"Time taken for writing data with shape {camera.image_shape} "):
# Pack data back into replicator format to save them using its writer
if self.sim.get_version()[0] == 4:
rep_output = {"annotators": {}}
camera_data = convert_dict_to_backend(camera.data.output[0].to_dict(), backend="numpy")
for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()):
if info is not None:
rep_output["annotators"][key] = {"render_product": {"data": data, **info}}
else:
rep_output["annotators"][key] = {"render_product": {"data": data}}
else:
rep_output = dict()
camera_data = convert_dict_to_backend(camera.data.output[0].to_dict(), backend="numpy")
for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()):
if info is not None:
rep_output[key] = {"data": data, "info": info}
else:
rep_output[key] = data
# Save images
rep_output["trigger_outputs"] = {"on_time": camera.frame[0]}
rep_writer.write(rep_output)
print("----------------------------------------")
# Check image data
for im_data in camera.data.output.values():
self.assertEqual(im_data.shape, (1, camera_cfg.pattern_cfg.height, camera_cfg.pattern_cfg.width))
def test_output_equal_to_usdcamera(self):
camera_pattern_cfg = patterns.PinholeCameraPatternCfg(
focal_length=24.0,
horizontal_aperture=20.955,
height=240,
width=320,
)
prim_utils.create_prim("/World/Camera_warp", "Xform")
camera_cfg_warp = RayCasterCameraCfg(
prim_path="/World/Camera",
mesh_prim_paths=["/World/defaultGroundPlane"],
update_period=0,
offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)),
debug_vis=False,
pattern_cfg=camera_pattern_cfg,
data_types=["distance_to_image_plane", "distance_to_camera", "normals"],
)
camera_warp = RayCasterCamera(camera_cfg_warp)
# create usd camera
camera_cfg_usd = CameraCfg(
height=240,
width=320,
prim_path="/World/Camera_usd",
update_period=0,
data_types=["distance_to_image_plane", "distance_to_camera", "normals"],
spawn=PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(1e-4, 1.0e5)
),
)
camera_usd = Camera(camera_cfg_usd)
# play sim
self.sim.reset()
self.sim.play()
# convert to torch tensors
eyes = torch.tensor([[2.5, 2.5, 4.5]], dtype=torch.float32, device=camera_warp.device)
targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera_warp.device)
# set views
camera_warp.set_world_poses_from_view(eyes, targets)
camera_usd.set_world_poses_from_view(eyes, targets)
# perform steps
for _ in range(5):
self.sim.step()
# update camera
camera_usd.update(self.dt)
camera_warp.update(self.dt)
# check image data
torch.testing.assert_close(
camera_usd.data.output["distance_to_image_plane"],
camera_warp.data.output["distance_to_image_plane"],
rtol=5e-3,
atol=1e-4,
)
torch.testing.assert_close(
camera_usd.data.output["distance_to_camera"],
camera_warp.data.output["distance_to_camera"],
rtol=5e-3,
atol=1e-4,
)
torch.testing.assert_close(
camera_usd.data.output["normals"][..., :3],
camera_warp.data.output["normals"],
rtol=1e-5,
atol=1e-4,
)
def test_output_equal_to_usdcamera_offset(self):
offset_rot = [-0.1251, 0.3617, 0.8731, -0.3020]
camera_pattern_cfg = patterns.PinholeCameraPatternCfg(
focal_length=24.0,
horizontal_aperture=20.955,
height=240,
width=320,
)
prim_utils.create_prim("/World/Camera_warp", "Xform")
camera_cfg_warp = RayCasterCameraCfg(
prim_path="/World/Camera",
mesh_prim_paths=["/World/defaultGroundPlane"],
update_period=0,
offset=RayCasterCameraCfg.OffsetCfg(pos=(2.5, 2.5, 4.0), rot=offset_rot, convention="ros"),
debug_vis=False,
pattern_cfg=camera_pattern_cfg,
data_types=["distance_to_image_plane", "distance_to_camera", "normals"],
)
camera_warp = RayCasterCamera(camera_cfg_warp)
# create usd camera
camera_cfg_usd = CameraCfg(
height=240,
width=320,
prim_path="/World/Camera_usd",
update_period=0,
data_types=["distance_to_image_plane", "distance_to_camera", "normals"],
spawn=PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(1e-6, 1.0e5)
),
offset=CameraCfg.OffsetCfg(pos=(2.5, 2.5, 4.0), rot=offset_rot, convention="ros"),
)
camera_usd = Camera(camera_cfg_usd)
# play sim
self.sim.reset()
self.sim.play()
# perform steps
for _ in range(5):
self.sim.step()
# update camera
camera_usd.update(self.dt)
camera_warp.update(self.dt)
# check image data
torch.testing.assert_close(
camera_usd.data.output["distance_to_image_plane"],
camera_warp.data.output["distance_to_image_plane"],
rtol=5e-3,
atol=1e-4,
)
torch.testing.assert_close(
camera_usd.data.output["distance_to_camera"],
camera_warp.data.output["distance_to_camera"],
rtol=5e-3,
atol=1e-4,
)
torch.testing.assert_close(
camera_usd.data.output["normals"][..., :3],
camera_warp.data.output["normals"],
rtol=1e-5,
atol=1e-4,
)
def test_output_equal_to_usdcamera_prim_offset(self):
"""Test that the output of the ray caster camera is equal to the output of the usd camera when both are placed
under an XForm prim that is translated and rotated from the world origin
."""
offset_rot = [-0.1251, 0.3617, 0.8731, -0.3020]
# gf quat
gf_quatf = Gf.Quatd()
gf_quatf.SetReal(QUAT_OPENGL[0])
gf_quatf.SetImaginary(tuple(QUAT_OPENGL[1:]))
camera_pattern_cfg = patterns.PinholeCameraPatternCfg(
focal_length=24.0,
horizontal_aperture=20.955,
height=240,
width=320,
)
prim_raycast_cam = prim_utils.create_prim("/World/Camera_warp", "Xform")
prim_raycast_cam.GetAttribute("xformOp:translate").Set(tuple(POSITION))
prim_raycast_cam.GetAttribute("xformOp:orient").Set(gf_quatf)
camera_cfg_warp = RayCasterCameraCfg(
prim_path="/World/Camera_warp",
mesh_prim_paths=["/World/defaultGroundPlane"],
update_period=0,
offset=RayCasterCameraCfg.OffsetCfg(pos=(0, 0, 2.0), rot=offset_rot, convention="ros"),
debug_vis=False,
pattern_cfg=camera_pattern_cfg,
data_types=["distance_to_image_plane", "distance_to_camera", "normals"],
)
camera_warp = RayCasterCamera(camera_cfg_warp)
# create usd camera
camera_cfg_usd = CameraCfg(
height=240,
width=320,
prim_path="/World/Camera_usd/camera",
update_period=0,
data_types=["distance_to_image_plane", "distance_to_camera", "normals"],
spawn=PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(1e-6, 1.0e5)
),
offset=CameraCfg.OffsetCfg(pos=(0, 0, 2.0), rot=offset_rot, convention="ros"),
)
prim_usd = prim_utils.create_prim("/World/Camera_usd", "Xform")
prim_usd.GetAttribute("xformOp:translate").Set(tuple(POSITION))
prim_usd.GetAttribute("xformOp:orient").Set(gf_quatf)
camera_usd = Camera(camera_cfg_usd)
# play sim
self.sim.reset()
self.sim.play()
# perform steps
for _ in range(5):
self.sim.step()
# update camera
camera_usd.update(self.dt)
camera_warp.update(self.dt)
# check if pos and orientation are correct
torch.testing.assert_close(camera_warp.data.pos_w[0], camera_usd.data.pos_w[0])
torch.testing.assert_close(camera_warp.data.quat_w_ros[0], camera_usd.data.quat_w_ros[0])
# check image data
torch.testing.assert_close(
camera_usd.data.output["distance_to_image_plane"],
camera_warp.data.output["distance_to_image_plane"],
rtol=5e-3,
atol=1e-4,
)
torch.testing.assert_close(
camera_usd.data.output["distance_to_camera"],
camera_warp.data.output["distance_to_camera"],
rtol=5e-3,
atol=1e-4,
)
torch.testing.assert_close(
camera_usd.data.output["normals"][..., :3],
camera_warp.data.output["normals"],
rtol=1e-5,
atol=1e-4,
)
if __name__ == "__main__":
run_tests()
| 24,480 |
Python
| 39.733777 | 120 | 0.594363 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/envs/check_base_env_floating_cube.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates the base environment concept that combines a scene with an action,
observation and event manager for a floating cube.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to use the concept of an Environment.")
parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.lab.envs.mdp as mdp
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import AssetBaseCfg, RigidObject, RigidObjectCfg
from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedEnvCfg
from omni.isaac.lab.managers import EventTermCfg as EventTerm
from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.managers.action_manager import ActionTerm, ActionTermCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
##
# Scene definition
##
@configclass
class MySceneCfg(InteractiveSceneCfg):
"""Example scene configuration."""
# add terrain
terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane", debug_vis=False)
# add cube
cube: RigidObjectCfg = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/cube",
spawn=sim_utils.CuboidCfg(
size=(0.2, 0.2, 0.2),
rigid_props=sim_utils.RigidBodyPropertiesCfg(max_depenetration_velocity=1.0),
mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
physics_material=sim_utils.RigidBodyMaterialCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 5)),
)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
)
##
# Action Term
##
class CubeActionTerm(ActionTerm):
"""Simple action term that implements a PD controller to track a target position."""
_asset: RigidObject
"""The articulation asset on which the action term is applied."""
def __init__(self, cfg: ActionTermCfg, env: ManagerBasedEnv):
# call super constructor
super().__init__(cfg, env)
# create buffers
self._raw_actions = torch.zeros(env.num_envs, 3, device=self.device)
self._processed_actions = torch.zeros(env.num_envs, 3, device=self.device)
self._vel_command = torch.zeros(self.num_envs, 6, device=self.device)
# gains of controller
self.p_gain = 5.0
self.d_gain = 0.5
"""
Properties.
"""
@property
def action_dim(self) -> int:
return self._raw_actions.shape[1]
@property
def raw_actions(self) -> torch.Tensor:
# desired: (x, y, z)
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_actions
"""
Operations
"""
def process_actions(self, actions: torch.Tensor):
# store the raw actions
self._raw_actions[:] = actions
# no-processing of actions
self._processed_actions[:] = self._raw_actions[:]
def apply_actions(self):
# implement a PD controller to track the target position
pos_error = self._processed_actions - (self._asset.data.root_pos_w - self._env.scene.env_origins)
vel_error = -self._asset.data.root_lin_vel_w
# set velocity targets
self._vel_command[:, :3] = self.p_gain * pos_error + self.d_gain * vel_error
self._asset.write_root_velocity_to_sim(self._vel_command)
@configclass
class CubeActionTermCfg(ActionTermCfg):
"""Configuration for the cube action term."""
class_type: type = CubeActionTerm
##
# Observation Term
##
def base_position(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor:
"""Root linear velocity in the asset's root frame."""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_pos_w - env.scene.env_origins
##
# Environment settings
##
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
joint_pos = CubeActionTermCfg(asset_name="cube")
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# cube velocity
position = ObsTerm(func=base_position, params={"asset_cfg": SceneEntityCfg("cube")})
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class EventCfg:
"""Configuration for events."""
reset_base = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (-0.5, 0.5),
"y": (-0.5, 0.5),
"z": (-0.5, 0.5),
},
"asset_cfg": SceneEntityCfg("cube"),
},
)
##
# Environment configuration
##
@configclass
class CubeEnvCfg(ManagerBasedEnvCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5, replicate_physics=True)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
events: EventCfg = EventCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 2
# simulation settings
self.sim.dt = 0.01
self.sim.physics_material = self.scene.terrain.physics_material
def main():
"""Main function."""
# setup base environment
env = ManagerBasedEnv(cfg=CubeEnvCfg())
# setup target position commands
target_position = torch.rand(env.num_envs, 3, device=env.device) * 2
target_position[:, 2] += 2.0
# offset all targets so that they move to the world origin
target_position -= env.scene.env_origins
# simulate physics
count = 0
while simulation_app.is_running():
with torch.inference_mode():
# reset
if count % 300 == 0:
env.reset()
count = 0
# step env
obs, _ = env.step(target_position)
# print mean squared position error between target and current position
error = torch.norm(obs["policy"] - target_position).mean().item()
print(f"[Step: {count:04d}]: Mean position error: {error:.4f}")
# update counter
count += 1
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,653 |
Python
| 27.666667 | 114 | 0.650856 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/envs/test_base_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# Can set this to False to see the GUI for debugging
HEADLESS = True
# launch omniverse app
app_launcher = AppLauncher(headless=HEADLESS)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import unittest
import omni.usd
from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedEnvCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.utils import configclass
@configclass
class EmptyActionsCfg:
"""Action specifications for the environment."""
pass
@configclass
class EmptySceneCfg(InteractiveSceneCfg):
"""Configuration for an empty scene."""
pass
def get_empty_base_env_cfg(device: str = "cuda:0", num_envs: int = 1, env_spacing: float = 1.0):
"""Generate base environment config based on device"""
@configclass
class EmptyEnvCfg(ManagerBasedEnvCfg):
"""Configuration for the empty test environment."""
# Scene settings
scene: EmptySceneCfg = EmptySceneCfg(num_envs=num_envs, env_spacing=env_spacing)
# Basic settings
actions: EmptyActionsCfg = EmptyActionsCfg()
def __post_init__(self):
"""Post initialization."""
# step settings
self.decimation = 4 # env step every 4 sim steps: 200Hz / 4 = 50Hz
# simulation settings
self.sim.dt = 0.005 # sim step every 5ms: 200Hz
# pass device down from test
self.sim.device = device
return EmptyEnvCfg()
class TestBaseEnv(unittest.TestCase):
"""Test for base env class"""
"""
Tests
"""
def test_initialization(self):
for device in ("cuda:0", "cpu"):
with self.subTest(device=device):
# create a new stage
omni.usd.get_context().new_stage()
# create environment
env = ManagerBasedEnv(cfg=get_empty_base_env_cfg(device=device))
# check size of action manager terms
self.assertEqual(env.action_manager.total_action_dim, 0)
self.assertEqual(len(env.action_manager.active_terms), 0)
self.assertEqual(len(env.action_manager.action_term_dim), 0)
# check size of observation manager terms
self.assertEqual(len(env.observation_manager.active_terms), 0)
self.assertEqual(len(env.observation_manager.group_obs_dim), 0)
self.assertEqual(len(env.observation_manager.group_obs_term_dim), 0)
self.assertEqual(len(env.observation_manager.group_obs_concatenate), 0)
# create actions of correct size (1,0)
act = torch.randn_like(env.action_manager.action)
# step environment to verify setup
for _ in range(2):
obs, ext = env.step(action=act)
# close the environment
env.close()
if __name__ == "__main__":
run_tests()
| 3,291 |
Python
| 30.056603 | 96 | 0.635369 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/envs/test_null_command_term.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import unittest
from collections import namedtuple
from omni.isaac.lab.envs.mdp import NullCommandCfg
class TestNullCommandTerm(unittest.TestCase):
"""Test cases for null command generator."""
def setUp(self) -> None:
self.env = namedtuple("ManagerBasedRLEnv", ["num_envs", "dt", "device"])(20, 0.1, "cpu")
def test_str(self):
"""Test the string representation of the command manager."""
cfg = NullCommandCfg()
command_term = cfg.class_type(cfg, self.env)
# print the expected string
print()
print(command_term)
def test_compute(self):
"""Test the compute function. For null command generator, it does nothing."""
cfg = NullCommandCfg()
command_term = cfg.class_type(cfg, self.env)
# test the reset function
command_term.reset()
# test the compute function
command_term.compute(dt=self.env.dt)
# expect error
with self.assertRaises(RuntimeError):
command_term.command
if __name__ == "__main__":
run_tests()
| 1,431 |
Python
| 26.538461 | 96 | 0.65898 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/envs/check_base_env_anymal_locomotion.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates the environment concept that combines a scene with an action,
observation and event manager for a quadruped robot.
A locomotion policy is loaded and used to control the robot. This shows how to use the
environment with a policy.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to use the concept of an Environment.")
parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import omni.isaac.lab.envs.mdp as mdp
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedEnvCfg
from omni.isaac.lab.managers import EventTermCfg as EventTerm
from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sensors import RayCasterCfg, patterns
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR, NVIDIA_NUCLEUS_DIR, check_file_path, read_file
from omni.isaac.lab.utils.noise import AdditiveUniformNoiseCfg as Unoise
##
# Pre-defined configs
##
from omni.isaac.lab.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip
from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip
##
# Scene definition
##
@configclass
class MySceneCfg(InteractiveSceneCfg):
"""Example scene configuration."""
# add terrain
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
visual_material=sim_utils.MdlFileCfg(
mdl_path=f"{ISAACLAB_NUCLEUS_DIR}/Materials/TilesMarbleSpiderWhiteBrickBondHoned/TilesMarbleSpiderWhiteBrickBondHoned.mdl",
project_uvw=True,
texture_scale=(0.25, 0.25),
),
debug_vis=False,
)
# add robot
robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# sensors
height_scanner = RayCasterCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=True,
mesh_prim_paths=["/World/ground"],
)
# lights
sky_light = AssetBaseCfg(
prim_path="/World/skyLight",
spawn=sim_utils.DomeLightCfg(
intensity=900.0,
texture_file=f"{NVIDIA_NUCLEUS_DIR}/Assets/Skies/Cloudy/kloofendal_48d_partly_cloudy_4k.hdr",
visible_in_primary_ray=False,
),
)
##
# MDP settings
##
def constant_commands(env: ManagerBasedEnv) -> torch.Tensor:
"""The generated command from the command generator."""
return torch.tensor([[1, 0, 0]], device=env.device).repeat(env.num_envs, 1)
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1))
base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2))
projected_gravity = ObsTerm(
func=mdp.projected_gravity,
noise=Unoise(n_min=-0.05, n_max=0.05),
)
velocity_commands = ObsTerm(func=constant_commands)
joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01))
joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5))
actions = ObsTerm(func=mdp.last_action)
height_scan = ObsTerm(
func=mdp.height_scan,
params={"sensor_cfg": SceneEntityCfg("height_scanner")},
noise=Unoise(n_min=-0.1, n_max=0.1),
clip=(-1.0, 1.0),
)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class EventCfg:
"""Configuration for events."""
reset_base = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (-0.5, 0.5),
"y": (-0.5, 0.5),
"z": (-0.5, 0.5),
"roll": (-0.5, 0.5),
"pitch": (-0.5, 0.5),
"yaw": (-0.5, 0.5),
},
},
)
##
# Environment configuration
##
@configclass
class QuadrupedEnvCfg(ManagerBasedEnvCfg):
"""Configuration for the locomotion velocity-tracking environment."""
# Scene settings
scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5, replicate_physics=True)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
events: EventCfg = EventCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 4
self.episode_length_s = 20.0
# simulation settings
self.sim.dt = 0.005
# update sensor update periods
# we tick all the sensors based on the smallest update period (physics update period)
if self.scene.height_scanner is not None:
self.scene.height_scanner.update_period = self.decimation * self.sim.dt
def main():
"""Main function."""
# setup base environment
env = ManagerBasedEnv(cfg=QuadrupedEnvCfg())
obs, _ = env.reset()
# load level policy
policy_path = ISAACLAB_NUCLEUS_DIR + "/Policies/ANYmal-C/HeightScan/policy.pt"
# check if policy file exists
if not check_file_path(policy_path):
raise FileNotFoundError(f"Policy file '{policy_path}' does not exist.")
file_bytes = read_file(policy_path)
# jit load the policy
locomotion_policy = torch.jit.load(file_bytes)
locomotion_policy.to(env.device)
locomotion_policy.eval()
# simulate physics
count = 0
while simulation_app.is_running():
with torch.inference_mode():
# reset
if count % 1000 == 0:
obs, _ = env.reset()
count = 0
print("[INFO]: Resetting robots state...")
# infer action
action = locomotion_policy(obs["policy"])
# step env
obs, _ = env.step(action)
# update counter
count += 1
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,913 |
Python
| 30.035294 | 135 | 0.644635 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/managers/test_observation_manager.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# needed to import for allowing type-hinting: torch.Tensor | None
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
"""Rest everything follows."""
import torch
import unittest
from collections import namedtuple
from omni.isaac.lab.managers import ManagerTermBase, ObservationGroupCfg, ObservationManager, ObservationTermCfg
from omni.isaac.lab.utils import configclass
def grilled_chicken(env):
return torch.ones(env.num_envs, 4, device=env.device)
def grilled_chicken_with_bbq(env, bbq: bool):
return bbq * torch.ones(env.num_envs, 1, device=env.device)
def grilled_chicken_with_curry(env, hot: bool):
return hot * 2 * torch.ones(env.num_envs, 1, device=env.device)
def grilled_chicken_with_yoghurt(env, hot: bool, bland: float):
return hot * bland * torch.ones(env.num_envs, 5, device=env.device)
def grilled_chicken_with_yoghurt_and_bbq(env, hot: bool, bland: float, bbq: bool = False):
return hot * bland * bbq * torch.ones(env.num_envs, 3, device=env.device)
class complex_function_class(ManagerTermBase):
def __init__(self, cfg: ObservationTermCfg, env: object):
self.cfg = cfg
self.env = env
# define some variables
self._time_passed = torch.zeros(env.num_envs, device=env.device)
def reset(self, env_ids: torch.Tensor | None = None):
if env_ids is None:
env_ids = slice(None)
self._time_passed[env_ids] = 0.0
def __call__(self, env: object, interval: float) -> torch.Tensor:
self._time_passed += interval
return self._time_passed.clone().unsqueeze(-1)
class non_callable_complex_function_class(ManagerTermBase):
def __init__(self, cfg: ObservationTermCfg, env: object):
self.cfg = cfg
self.env = env
# define some variables
self._cost = 2 * self.env.num_envs
def call_me(self, env: object) -> torch.Tensor:
return torch.ones(env.num_envs, 2, device=env.device) * self._cost
class MyDataClass:
def __init__(self, num_envs: int, device: str):
self.pos_w = torch.rand((num_envs, 3), device=device)
self.lin_vel_w = torch.rand((num_envs, 3), device=device)
def pos_w_data(env) -> torch.Tensor:
return env.data.pos_w
def lin_vel_w_data(env) -> torch.Tensor:
return env.data.lin_vel_w
class TestObservationManager(unittest.TestCase):
"""Test cases for various situations with observation manager."""
def setUp(self) -> None:
# set up the environment
self.num_envs = 20
self.device = "cuda:0"
# create dummy environment
self.env = namedtuple("ManagerBasedEnv", ["num_envs", "device", "data"])(
self.num_envs, self.device, MyDataClass(self.num_envs, self.device)
)
def test_str(self):
"""Test the string representation of the observation manager."""
@configclass
class MyObservationManagerCfg:
"""Test config class for observation manager."""
@configclass
class SampleGroupCfg(ObservationGroupCfg):
"""Test config class for policy observation group."""
term_1 = ObservationTermCfg(func="__main__:grilled_chicken", scale=10)
term_2 = ObservationTermCfg(func=grilled_chicken, scale=2)
term_3 = ObservationTermCfg(func=grilled_chicken_with_bbq, scale=5, params={"bbq": True})
term_4 = ObservationTermCfg(
func=grilled_chicken_with_yoghurt, scale=1.0, params={"hot": False, "bland": 2.0}
)
term_5 = ObservationTermCfg(
func=grilled_chicken_with_yoghurt_and_bbq, scale=1.0, params={"hot": False, "bland": 2.0}
)
policy: ObservationGroupCfg = SampleGroupCfg()
# create observation manager
cfg = MyObservationManagerCfg()
self.obs_man = ObservationManager(cfg, self.env)
self.assertEqual(len(self.obs_man.active_terms["policy"]), 5)
# print the expected string
print()
print(self.obs_man)
def test_config_equivalence(self):
"""Test the equivalence of observation manager created from different config types."""
# create from config class
@configclass
class MyObservationManagerCfg:
"""Test config class for observation manager."""
@configclass
class SampleGroupCfg(ObservationGroupCfg):
"""Test config class for policy observation group."""
your_term = ObservationTermCfg(func="__main__:grilled_chicken", scale=10)
his_term = ObservationTermCfg(func=grilled_chicken, scale=2)
my_term = ObservationTermCfg(func=grilled_chicken_with_bbq, scale=5, params={"bbq": True})
her_term = ObservationTermCfg(
func=grilled_chicken_with_yoghurt, scale=1.0, params={"hot": False, "bland": 2.0}
)
policy = SampleGroupCfg()
critic = SampleGroupCfg(concatenate_terms=False, her_term=None)
cfg = MyObservationManagerCfg()
obs_man_from_cfg = ObservationManager(cfg, self.env)
# create from config class
@configclass
class MyObservationManagerAnnotatedCfg:
"""Test config class for observation manager with annotations on terms."""
@configclass
class SampleGroupCfg(ObservationGroupCfg):
"""Test config class for policy observation group."""
your_term: ObservationTermCfg = ObservationTermCfg(func="__main__:grilled_chicken", scale=10)
his_term: ObservationTermCfg = ObservationTermCfg(func=grilled_chicken, scale=2)
my_term: ObservationTermCfg = ObservationTermCfg(
func=grilled_chicken_with_bbq, scale=5, params={"bbq": True}
)
her_term: ObservationTermCfg = ObservationTermCfg(
func=grilled_chicken_with_yoghurt, scale=1.0, params={"hot": False, "bland": 2.0}
)
policy: ObservationGroupCfg = SampleGroupCfg()
critic: ObservationGroupCfg = SampleGroupCfg(concatenate_terms=False, her_term=None)
cfg = MyObservationManagerAnnotatedCfg()
obs_man_from_annotated_cfg = ObservationManager(cfg, self.env)
# check equivalence
# parsed terms
self.assertEqual(obs_man_from_cfg.active_terms, obs_man_from_annotated_cfg.active_terms)
self.assertEqual(obs_man_from_cfg.group_obs_term_dim, obs_man_from_annotated_cfg.group_obs_term_dim)
self.assertEqual(obs_man_from_cfg.group_obs_dim, obs_man_from_annotated_cfg.group_obs_dim)
# parsed term configs
self.assertEqual(obs_man_from_cfg._group_obs_term_cfgs, obs_man_from_annotated_cfg._group_obs_term_cfgs)
self.assertEqual(obs_man_from_cfg._group_obs_concatenate, obs_man_from_annotated_cfg._group_obs_concatenate)
def test_config_terms(self):
"""Test the number of terms in the observation manager."""
@configclass
class MyObservationManagerCfg:
"""Test config class for observation manager."""
@configclass
class SampleGroupCfg(ObservationGroupCfg):
"""Test config class for policy observation group."""
term_1 = ObservationTermCfg(func=grilled_chicken, scale=10)
term_2 = ObservationTermCfg(func=grilled_chicken_with_curry, scale=0.0, params={"hot": False})
policy: ObservationGroupCfg = SampleGroupCfg()
critic: ObservationGroupCfg = SampleGroupCfg(term_2=None)
# create observation manager
cfg = MyObservationManagerCfg()
self.obs_man = ObservationManager(cfg, self.env)
self.assertEqual(len(self.obs_man.active_terms["policy"]), 2)
self.assertEqual(len(self.obs_man.active_terms["critic"]), 1)
def test_compute(self):
"""Test the observation computation."""
@configclass
class MyObservationManagerCfg:
"""Test config class for observation manager."""
@configclass
class PolicyCfg(ObservationGroupCfg):
"""Test config class for policy observation group."""
term_1 = ObservationTermCfg(func=grilled_chicken, scale=10)
term_2 = ObservationTermCfg(func=grilled_chicken_with_curry, scale=0.0, params={"hot": False})
term_3 = ObservationTermCfg(func=pos_w_data, scale=2.0)
term_4 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5)
@configclass
class CriticCfg(ObservationGroupCfg):
term_1 = ObservationTermCfg(func=pos_w_data, scale=2.0)
term_2 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5)
term_3 = ObservationTermCfg(func=pos_w_data, scale=2.0)
term_4 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5)
policy: ObservationGroupCfg = PolicyCfg()
critic: ObservationGroupCfg = CriticCfg()
# create observation manager
cfg = MyObservationManagerCfg()
self.obs_man = ObservationManager(cfg, self.env)
# compute observation using manager
observations = self.obs_man.compute()
# obtain the group observations
obs_policy: torch.Tensor = observations["policy"]
obs_critic: torch.Tensor = observations["critic"]
# check the observation shape
self.assertEqual((self.env.num_envs, 11), obs_policy.shape)
self.assertEqual((self.env.num_envs, 12), obs_critic.shape)
# make sure that the data are the same for same terms
# -- within group
torch.testing.assert_close(obs_critic[:, 0:3], obs_critic[:, 6:9])
torch.testing.assert_close(obs_critic[:, 3:6], obs_critic[:, 9:12])
# -- between groups
torch.testing.assert_close(obs_policy[:, 5:8], obs_critic[:, 0:3])
torch.testing.assert_close(obs_policy[:, 8:11], obs_critic[:, 3:6])
def test_invalid_observation_config(self):
"""Test the invalid observation config."""
@configclass
class MyObservationManagerCfg:
"""Test config class for observation manager."""
@configclass
class PolicyCfg(ObservationGroupCfg):
"""Test config class for policy observation group."""
term_1 = ObservationTermCfg(func=grilled_chicken_with_bbq, scale=0.1, params={"hot": False})
term_2 = ObservationTermCfg(func=grilled_chicken_with_yoghurt, scale=2.0, params={"hot": False})
policy: ObservationGroupCfg = PolicyCfg()
# create observation manager
cfg = MyObservationManagerCfg()
# check the invalid config
with self.assertRaises(ValueError):
self.obs_man = ObservationManager(cfg, self.env)
def test_callable_class_term(self):
"""Test the observation computation with callable class term."""
@configclass
class MyObservationManagerCfg:
"""Test config class for observation manager."""
@configclass
class PolicyCfg(ObservationGroupCfg):
"""Test config class for policy observation group."""
term_1 = ObservationTermCfg(func=grilled_chicken, scale=10)
term_2 = ObservationTermCfg(func=complex_function_class, scale=0.2, params={"interval": 0.5})
policy: ObservationGroupCfg = PolicyCfg()
# create observation manager
cfg = MyObservationManagerCfg()
self.obs_man = ObservationManager(cfg, self.env)
# compute observation using manager
observations = self.obs_man.compute()
# check the observation
self.assertEqual((self.env.num_envs, 5), observations["policy"].shape)
self.assertAlmostEqual(observations["policy"][0, -1].item(), 0.2 * 0.5)
# check memory in term
num_exec_count = 10
for _ in range(num_exec_count):
observations = self.obs_man.compute()
self.assertAlmostEqual(observations["policy"][0, -1].item(), 0.2 * 0.5 * (num_exec_count + 1))
# check reset works
self.obs_man.reset(env_ids=[0, 4, 9, 14, 19])
observations = self.obs_man.compute()
self.assertAlmostEqual(observations["policy"][0, -1].item(), 0.2 * 0.5)
self.assertAlmostEqual(observations["policy"][1, -1].item(), 0.2 * 0.5 * (num_exec_count + 2))
def test_non_callable_class_term(self):
"""Test the observation computation with non-callable class term."""
@configclass
class MyObservationManagerCfg:
"""Test config class for observation manager."""
@configclass
class PolicyCfg(ObservationGroupCfg):
"""Test config class for policy observation group."""
term_1 = ObservationTermCfg(func=grilled_chicken, scale=10)
term_2 = ObservationTermCfg(func=non_callable_complex_function_class, scale=0.2)
policy: ObservationGroupCfg = PolicyCfg()
# create observation manager config
cfg = MyObservationManagerCfg()
# create observation manager
with self.assertRaises(NotImplementedError):
self.obs_man = ObservationManager(cfg, self.env)
if __name__ == "__main__":
run_tests()
| 13,723 |
Python
| 38.77971 | 116 | 0.632223 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/managers/test_reward_manager.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
"""Rest everything follows."""
import unittest
from collections import namedtuple
from omni.isaac.lab.managers import RewardManager, RewardTermCfg
from omni.isaac.lab.utils import configclass
def grilled_chicken(env):
return 1
def grilled_chicken_with_bbq(env, bbq: bool):
return 0
def grilled_chicken_with_curry(env, hot: bool):
return 0
def grilled_chicken_with_yoghurt(env, hot: bool, bland: float):
return 0
class TestRewardManager(unittest.TestCase):
"""Test cases for various situations with reward manager."""
def setUp(self) -> None:
self.env = namedtuple("ManagerBasedRLEnv", ["num_envs", "dt", "device"])(20, 0.1, "cpu")
def test_str(self):
"""Test the string representation of the reward manager."""
cfg = {
"term_1": RewardTermCfg(func=grilled_chicken, weight=10),
"term_2": RewardTermCfg(func=grilled_chicken_with_bbq, weight=5, params={"bbq": True}),
"term_3": RewardTermCfg(
func=grilled_chicken_with_yoghurt,
weight=1.0,
params={"hot": False, "bland": 2.0},
),
}
self.rew_man = RewardManager(cfg, self.env)
self.assertEqual(len(self.rew_man.active_terms), 3)
# print the expected string
print()
print(self.rew_man)
def test_config_equivalence(self):
"""Test the equivalence of reward manager created from different config types."""
# create from dictionary
cfg = {
"my_term": RewardTermCfg(func=grilled_chicken, weight=10),
"your_term": RewardTermCfg(func=grilled_chicken_with_bbq, weight=2.0, params={"bbq": True}),
"his_term": RewardTermCfg(
func=grilled_chicken_with_yoghurt,
weight=1.0,
params={"hot": False, "bland": 2.0},
),
}
rew_man_from_dict = RewardManager(cfg, self.env)
# create from config class
@configclass
class MyRewardManagerCfg:
"""Reward manager config with no type annotations."""
my_term = RewardTermCfg(func=grilled_chicken, weight=10.0)
your_term = RewardTermCfg(func=grilled_chicken_with_bbq, weight=2.0, params={"bbq": True})
his_term = RewardTermCfg(func=grilled_chicken_with_yoghurt, weight=1.0, params={"hot": False, "bland": 2.0})
cfg = MyRewardManagerCfg()
rew_man_from_cfg = RewardManager(cfg, self.env)
# create from config class
@configclass
class MyRewardManagerAnnotatedCfg:
"""Reward manager config with type annotations."""
my_term: RewardTermCfg = RewardTermCfg(func=grilled_chicken, weight=10.0)
your_term: RewardTermCfg = RewardTermCfg(func=grilled_chicken_with_bbq, weight=2.0, params={"bbq": True})
his_term: RewardTermCfg = RewardTermCfg(
func=grilled_chicken_with_yoghurt, weight=1.0, params={"hot": False, "bland": 2.0}
)
cfg = MyRewardManagerAnnotatedCfg()
rew_man_from_annotated_cfg = RewardManager(cfg, self.env)
# check equivalence
# parsed terms
self.assertEqual(rew_man_from_dict.active_terms, rew_man_from_annotated_cfg.active_terms)
self.assertEqual(rew_man_from_cfg.active_terms, rew_man_from_annotated_cfg.active_terms)
self.assertEqual(rew_man_from_dict.active_terms, rew_man_from_cfg.active_terms)
# parsed term configs
self.assertEqual(rew_man_from_dict._term_cfgs, rew_man_from_annotated_cfg._term_cfgs)
self.assertEqual(rew_man_from_cfg._term_cfgs, rew_man_from_annotated_cfg._term_cfgs)
self.assertEqual(rew_man_from_dict._term_cfgs, rew_man_from_cfg._term_cfgs)
def test_compute(self):
"""Test the computation of reward."""
cfg = {
"term_1": RewardTermCfg(func=grilled_chicken, weight=10),
"term_2": RewardTermCfg(func=grilled_chicken_with_curry, weight=0.0, params={"hot": False}),
}
self.rew_man = RewardManager(cfg, self.env)
# compute expected reward
expected_reward = cfg["term_1"].weight * self.env.dt
# compute reward using manager
rewards = self.rew_man.compute(dt=self.env.dt)
# check the reward for environment index 0
self.assertEqual(float(rewards[0]), expected_reward)
self.assertEqual(tuple(rewards.shape), (self.env.num_envs,))
def test_active_terms(self):
"""Test the correct reading of active terms."""
cfg = {
"term_1": RewardTermCfg(func=grilled_chicken, weight=10),
"term_2": RewardTermCfg(func=grilled_chicken_with_bbq, weight=5, params={"bbq": True}),
"term_3": RewardTermCfg(func=grilled_chicken_with_curry, weight=0.0, params={"hot": False}),
}
self.rew_man = RewardManager(cfg, self.env)
self.assertEqual(len(self.rew_man.active_terms), 3)
def test_missing_weight(self):
"""Test the missing of weight in the config."""
# TODO: The error should be raised during the config parsing, not during the reward manager creation.
cfg = {
"term_1": RewardTermCfg(func=grilled_chicken, weight=10),
"term_2": RewardTermCfg(func=grilled_chicken_with_bbq, params={"bbq": True}),
}
with self.assertRaises(TypeError):
self.rew_man = RewardManager(cfg, self.env)
def test_invalid_reward_func_module(self):
"""Test the handling of invalid reward function's module in string representation."""
cfg = {
"term_1": RewardTermCfg(func=grilled_chicken, weight=10),
"term_2": RewardTermCfg(func=grilled_chicken_with_bbq, weight=5, params={"bbq": True}),
"term_3": RewardTermCfg(func="a:grilled_chicken_with_no_bbq", weight=0.1, params={"hot": False}),
}
with self.assertRaises(ValueError):
self.rew_man = RewardManager(cfg, self.env)
def test_invalid_reward_config(self):
"""Test the handling of invalid reward function's config parameters."""
cfg = {
"term_1": RewardTermCfg(func=grilled_chicken_with_bbq, weight=0.1, params={"hot": False}),
"term_2": RewardTermCfg(func=grilled_chicken_with_yoghurt, weight=2.0, params={"hot": False}),
}
with self.assertRaises(ValueError):
self.rew_man = RewardManager(cfg, self.env)
if __name__ == "__main__":
run_tests()
| 6,811 |
Python
| 39.307692 | 120 | 0.629276 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/deps/test_torch.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
import torch.utils.benchmark as benchmark
import unittest
from omni.isaac.lab.app import run_tests
class TestTorchOperations(unittest.TestCase):
"""Tests for assuring torch related operations used in Isaac Lab."""
def test_array_slicing(self):
"""Check that using ellipsis and slices work for torch tensors."""
size = (400, 300, 5)
my_tensor = torch.rand(size, device="cuda:0")
self.assertEqual(my_tensor[..., 0].shape, (400, 300))
self.assertEqual(my_tensor[:, :, 0].shape, (400, 300))
self.assertEqual(my_tensor[slice(None), slice(None), 0].shape, (400, 300))
with self.assertRaises(IndexError):
my_tensor[..., ..., 0]
self.assertEqual(my_tensor[0, ...].shape, (300, 5))
self.assertEqual(my_tensor[0, :, :].shape, (300, 5))
self.assertEqual(my_tensor[0, slice(None), slice(None)].shape, (300, 5))
self.assertEqual(my_tensor[0, ..., ...].shape, (300, 5))
self.assertEqual(my_tensor[..., 0, 0].shape, (400,))
self.assertEqual(my_tensor[slice(None), 0, 0].shape, (400,))
self.assertEqual(my_tensor[:, 0, 0].shape, (400,))
def test_array_circular(self):
"""Check circular buffer implementation in torch."""
size = (10, 30, 5)
my_tensor = torch.rand(size, device="cuda:0")
# roll up the tensor without cloning
my_tensor_1 = my_tensor.clone()
my_tensor_1[:, 1:, :] = my_tensor_1[:, :-1, :]
my_tensor_1[:, 0, :] = my_tensor[:, -1, :]
# check that circular buffer works as expected
error = torch.max(torch.abs(my_tensor_1 - my_tensor.roll(1, dims=1)))
self.assertNotEqual(error.item(), 0.0)
self.assertFalse(torch.allclose(my_tensor_1, my_tensor.roll(1, dims=1)))
# roll up the tensor with cloning
my_tensor_2 = my_tensor.clone()
my_tensor_2[:, 1:, :] = my_tensor_2[:, :-1, :].clone()
my_tensor_2[:, 0, :] = my_tensor[:, -1, :]
# check that circular buffer works as expected
error = torch.max(torch.abs(my_tensor_2 - my_tensor.roll(1, dims=1)))
self.assertEqual(error.item(), 0.0)
self.assertTrue(torch.allclose(my_tensor_2, my_tensor.roll(1, dims=1)))
# roll up the tensor with detach operation
my_tensor_3 = my_tensor.clone()
my_tensor_3[:, 1:, :] = my_tensor_3[:, :-1, :].detach()
my_tensor_3[:, 0, :] = my_tensor[:, -1, :]
# check that circular buffer works as expected
error = torch.max(torch.abs(my_tensor_3 - my_tensor.roll(1, dims=1)))
self.assertNotEqual(error.item(), 0.0)
self.assertFalse(torch.allclose(my_tensor_3, my_tensor.roll(1, dims=1)))
# roll up the tensor with roll operation
my_tensor_4 = my_tensor.clone()
my_tensor_4 = my_tensor_4.roll(1, dims=1)
my_tensor_4[:, 0, :] = my_tensor[:, -1, :]
# check that circular buffer works as expected
error = torch.max(torch.abs(my_tensor_4 - my_tensor.roll(1, dims=1)))
self.assertEqual(error.item(), 0.0)
self.assertTrue(torch.allclose(my_tensor_4, my_tensor.roll(1, dims=1)))
def test_array_circular_copy(self):
"""Check that circular buffer implementation in torch is copying data."""
size = (10, 30, 5)
my_tensor = torch.rand(size, device="cuda:0")
my_tensor_clone = my_tensor.clone()
# roll up the tensor
my_tensor_1 = my_tensor.clone()
my_tensor_1[:, 1:, :] = my_tensor_1[:, :-1, :].clone()
my_tensor_1[:, 0, :] = my_tensor[:, -1, :]
# change the source tensor
my_tensor[:, 0, :] = 1000
# check that circular buffer works as expected
self.assertFalse(torch.allclose(my_tensor_1, my_tensor.roll(1, dims=1)))
self.assertTrue(torch.allclose(my_tensor_1, my_tensor_clone.roll(1, dims=1)))
def test_array_multi_indexing(self):
"""Check multi-indexing works for torch tensors."""
size = (400, 300, 5)
my_tensor = torch.rand(size, device="cuda:0")
# this fails since array indexing cannot be broadcasted!!
with self.assertRaises(IndexError):
my_tensor[[0, 1, 2, 3], [0, 1, 2, 3, 4]]
def test_array_single_indexing(self):
"""Check how indexing effects the returned tensor."""
size = (400, 300, 5)
my_tensor = torch.rand(size, device="cuda:0")
# obtain a slice of the tensor
my_slice = my_tensor[0, ...]
self.assertEqual(my_slice.untyped_storage().data_ptr(), my_tensor.untyped_storage().data_ptr())
# obtain a slice over ranges
my_slice = my_tensor[0:2, ...]
self.assertEqual(my_slice.untyped_storage().data_ptr(), my_tensor.untyped_storage().data_ptr())
# obtain a slice over list
my_slice = my_tensor[[0, 1], ...]
self.assertNotEqual(my_slice.untyped_storage().data_ptr(), my_tensor.untyped_storage().data_ptr())
# obtain a slice over tensor
my_slice = my_tensor[torch.tensor([0, 1]), ...]
self.assertNotEqual(my_slice.untyped_storage().data_ptr(), my_tensor.untyped_storage().data_ptr())
def test_logical_or(self):
"""Test bitwise or operation."""
size = (400, 300, 5)
my_tensor_1 = torch.rand(size, device="cuda:0") > 0.5
my_tensor_2 = torch.rand(size, device="cuda:0") < 0.5
# check the speed of logical or
timer_logical_or = benchmark.Timer(
stmt="torch.logical_or(my_tensor_1, my_tensor_2)",
globals={"my_tensor_1": my_tensor_1, "my_tensor_2": my_tensor_2},
)
timer_bitwise_or = benchmark.Timer(
stmt="my_tensor_1 | my_tensor_2", globals={"my_tensor_1": my_tensor_1, "my_tensor_2": my_tensor_2}
)
print("Time for logical or:", timer_logical_or.timeit(number=1000))
print("Time for bitwise or:", timer_bitwise_or.timeit(number=1000))
# check that logical or works as expected
output_logical_or = torch.logical_or(my_tensor_1, my_tensor_2)
output_bitwise_or = my_tensor_1 | my_tensor_2
self.assertTrue(torch.allclose(output_logical_or, output_bitwise_or))
if __name__ == "__main__":
run_tests()
| 6,394 |
Python
| 40.258064 | 110 | 0.595558 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/deps/test_scipy.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# isort: off
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# isort: on
import numpy as np
import scipy.interpolate as interpolate
import unittest
from omni.isaac.lab.app import run_tests
class TestScipyOperations(unittest.TestCase):
"""Tests for assuring scipy related operations used in Isaac Lab."""
def test_interpolation(self):
"""Test scipy interpolation 2D method."""
# parameters
size = (10.0, 12.0)
horizontal_scale = 0.1
vertical_scale = 0.005
downsampled_scale = 0.2
noise_range = (-0.02, 0.1)
noise_step = 0.02
# switch parameters to discrete units
# -- horizontal scale
width_pixels = int(size[0] / horizontal_scale)
length_pixels = int(size[1] / horizontal_scale)
# -- downsampled scale
width_downsampled = int(size[0] / downsampled_scale)
length_downsampled = int(size[1] / downsampled_scale)
# -- height
height_min = int(noise_range[0] / vertical_scale)
height_max = int(noise_range[1] / vertical_scale)
height_step = int(noise_step / vertical_scale)
# create range of heights possible
height_range = np.arange(height_min, height_max + height_step, height_step)
# sample heights randomly from the range along a grid
height_field_downsampled = np.random.choice(height_range, size=(width_downsampled, length_downsampled))
# create interpolation function for the sampled heights
x = np.linspace(0, size[0] * horizontal_scale, width_downsampled)
y = np.linspace(0, size[1] * horizontal_scale, length_downsampled)
# interpolate the sampled heights to obtain the height field
x_upsampled = np.linspace(0, size[0] * horizontal_scale, width_pixels)
y_upsampled = np.linspace(0, size[1] * horizontal_scale, length_pixels)
# -- method 1: interp2d (this will be deprecated in the future 1.12 release)
func_interp2d = interpolate.interp2d(y, x, height_field_downsampled, kind="cubic")
z_upsampled_interp2d = func_interp2d(y_upsampled, x_upsampled)
# -- method 2: RectBivariateSpline (alternate to interp2d)
func_RectBiVariate = interpolate.RectBivariateSpline(x, y, height_field_downsampled)
z_upsampled_RectBivariant = func_RectBiVariate(x_upsampled, y_upsampled)
# -- method 3: RegularGridInterpolator (recommended from scipy but slow!)
# Ref: https://github.com/scipy/scipy/issues/18010
func_RegularGridInterpolator = interpolate.RegularGridInterpolator(
(x, y), height_field_downsampled, method="cubic"
)
xx_upsampled, yy_upsampled = np.meshgrid(x_upsampled, y_upsampled, indexing="ij", sparse=True)
z_upsampled_RegularGridInterpolator = func_RegularGridInterpolator((xx_upsampled, yy_upsampled))
# check if the interpolated height field is the same as the sampled height field
np.testing.assert_allclose(z_upsampled_interp2d, z_upsampled_RectBivariant, atol=1e-14)
np.testing.assert_allclose(z_upsampled_RectBivariant, z_upsampled_RegularGridInterpolator, atol=1e-14)
np.testing.assert_allclose(z_upsampled_RegularGridInterpolator, z_upsampled_interp2d, atol=1e-14)
if __name__ == "__main__":
run_tests()
| 3,468 |
Python
| 44.644736 | 111 | 0.683391 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/deps/isaacsim/check_floating_base_made_fixed.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script demonstrates how to make a floating robot fixed in Isaac Sim."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.kit import SimulationApp
# add argparse arguments
parser = argparse.ArgumentParser(
description="This script shows the issue in Isaac Sim with making a floating robot fixed."
)
parser.add_argument("--headless", action="store_true", help="Run in headless mode.")
parser.add_argument("--fix-base", action="store_true", help="Whether to fix the base of the robot.")
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
simulation_app = SimulationApp({"headless": args_cli.headless})
"""Rest everything follows."""
import torch
import carb
import omni.isaac.core.utils.nucleus as nucleus_utils
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.kit.commands
import omni.physx
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.carb import set_carb_setting
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.core.world import World
from pxr import PhysxSchema, UsdPhysics
# check nucleus connection
if nucleus_utils.get_assets_root_path() is None:
msg = (
"Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n"
"\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus"
)
carb.log_error(msg)
raise RuntimeError(msg)
ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac"
"""Path to the `Isaac` directory on the NVIDIA Nucleus Server."""
ISAACLAB_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac/Samples/Orbit"
"""Path to the `Isaac/Samples/Orbit` directory on the NVIDIA Nucleus Server."""
"""
Main
"""
def main():
"""Spawns the ANYmal robot and makes it fixed."""
# Load kit helper
world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cpu")
# Set main camera
set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Enable hydra scene-graph instancing
# this is needed to visualize the scene when flatcache is enabled
set_carb_setting(world._settings, "/persistent/omnihydra/useSceneGraphInstancing", True)
# Spawn things into stage
# Ground-plane
world.scene.add_default_ground_plane(prim_path="/World/defaultGroundPlane", z_position=0.0)
# Lights-1
prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0))
# Lights-2
prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0))
# -- Robot
# resolve asset
usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd"
root_prim_path = "/World/Robot/base"
# add asset
print("Loading robot from: ", usd_path)
prim_utils.create_prim(
"/World/Robot",
usd_path=usd_path,
translation=(0.0, 0.0, 0.6),
)
# create fixed joint
if args_cli.fix_base:
# get all necessary information
stage = stage_utils.get_current_stage()
root_prim = stage.GetPrimAtPath(root_prim_path)
parent_prim = root_prim.GetParent()
# here we assume that the root prim is a rigid body
# there is no clear way to deal with situation where the root prim is not a rigid body but has articulation api
# in that case, it is unclear how to get the link to the first link in the tree
if not root_prim.HasAPI(UsdPhysics.RigidBodyAPI):
raise RuntimeError("The root prim does not have the RigidBodyAPI applied.")
# create fixed joint
omni.kit.commands.execute(
"CreateJointCommand",
stage=stage,
joint_type="Fixed",
from_prim=None,
to_prim=root_prim,
)
# move the root to the parent if this is a rigid body
# having a fixed joint on a rigid body makes physx treat it as a part of the maximal coordinate tree
# if we put to joint on the parent, physx parser treats it as a fixed base articulation
# get parent prim
parent_prim = root_prim.GetParent()
# apply api to parent
UsdPhysics.ArticulationRootAPI.Apply(parent_prim)
PhysxSchema.PhysxArticulationAPI.Apply(parent_prim)
# copy the attributes
# -- usd attributes
root_usd_articulation_api = UsdPhysics.ArticulationRootAPI(root_prim)
for attr_name in root_usd_articulation_api.GetSchemaAttributeNames():
attr = root_prim.GetAttribute(attr_name)
parent_prim.GetAttribute(attr_name).Set(attr.Get())
# -- physx attributes
root_physx_articulation_api = PhysxSchema.PhysxArticulationAPI(root_prim)
for attr_name in root_physx_articulation_api.GetSchemaAttributeNames():
attr = root_prim.GetAttribute(attr_name)
parent_prim.GetAttribute(attr_name).Set(attr.Get())
# remove api from root
root_prim.RemoveAPI(UsdPhysics.ArticulationRootAPI)
root_prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI)
# rename root path to parent path
root_prim_path = parent_prim.GetPath().pathString
# Setup robot
robot_view = ArticulationView(root_prim_path, name="ANYMAL")
world.scene.add(robot_view)
# Play the simulator
world.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# dummy actions
# actions = torch.zeros(robot.count, robot.num_actions, device=robot.device)
init_root_pos_w, init_root_quat_w = robot_view.get_world_poses()
# Define simulation stepping
sim_dt = world.get_physics_dt()
# episode counter
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# If simulation is stopped, then exit.
if world.is_stopped():
break
# If simulation is paused, then skip.
if not world.is_playing():
world.step(render=False)
continue
# do reset
if count % 20 == 0:
# reset
sim_time = 0.0
count = 0
# reset root state
root_pos_w = init_root_pos_w.clone()
root_pos_w[:, :2] += torch.rand_like(root_pos_w[:, :2]) * 0.5
robot_view.set_world_poses(root_pos_w, init_root_quat_w)
# print if it is fixed base
print("Fixed base: ", robot_view._physics_view.shared_metatype.fixed_base)
print("Moving base to: ", root_pos_w[0].cpu().numpy())
print("-" * 50)
# apply random joint actions
actions = torch.rand_like(robot_view.get_joint_positions()) * 0.001
robot_view.set_joint_efforts(actions)
# perform step
world.step()
# update sim-time
sim_time += sim_dt
count += 1
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,173 |
Python
| 34.87 | 119 | 0.657187 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/deps/isaacsim/check_camera.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script shows the issue with renderer in Isaac Sim that affects episodic resets.
The first few images of every new episode are not updated. They take multiple steps to update
and have the same image as the previous episode for the first few steps.
```
# run with cube
_isaac_sim/python.sh source/extensions/omni.isaac.lab/test/deps/isaacsim/check_camera.py --scenario cube
# run with anymal
_isaac_sim/python.sh source/extensions/omni.isaac.lab/test/deps/isaacsim/check_camera.py --scenario anymal
```
"""
"""Launch Isaac Sim Simulator first."""
import argparse
# omni.isaac.lab
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(
description="This script shows the issue with renderer in Isaac Sim that affects episodic resets."
)
parser.add_argument("--gpu", action="store_true", default=False, help="Use GPU device for camera rendering output.")
parser.add_argument("--scenario", type=str, default="anymal", help="Scenario to load.", choices=["anymal", "cube"])
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import os
import random
try:
import omni.isaac.nucleus as nucleus_utils
except ModuleNotFoundError:
import omni.isaac.core.utils.nucleus as nucleus_utils
import omni.isaac.core.utils.prims as prim_utils
import omni.replicator.core as rep
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import GeometryPrim, RigidPrim, RigidPrimView
from omni.isaac.core.utils.carb import set_carb_setting
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.core.world import World
from PIL import Image, ImageChops
from pxr import Gf, UsdGeom
# check nucleus connection
if nucleus_utils.get_assets_root_path() is None:
msg = (
"Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n"
"\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus"
)
raise RuntimeError(msg)
ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac"
"""Path to the `Isaac` directory on the NVIDIA Nucleus Server."""
def main():
"""Runs a camera sensor from isaaclab."""
# Load kit helper
world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cpu")
# Set main camera
set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Enable flatcache which avoids passing data over to USD structure
# this speeds up the read-write operation of GPU buffers
if world.get_physics_context().use_gpu_pipeline:
world.get_physics_context().enable_flatcache(True)
# Enable hydra scene-graph instancing
# this is needed to visualize the scene when flatcache is enabled
set_carb_setting(world._settings, "/persistent/omnihydra/useSceneGraphInstancing", True)
# Populate scene
# Ground
world.scene.add_default_ground_plane()
# Lights-1
prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0))
# Lights-2
prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0))
# Xform to hold objects
if args_cli.scenario == "cube":
prim_utils.create_prim("/World/Objects", "Xform")
# Random objects
for i in range(8):
# sample random position
position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
position *= np.asarray([1.5, 1.5, 0.5])
# create prim
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
_ = prim_utils.create_prim(
f"/World/Objects/Obj_{i:02d}",
prim_type,
translation=position,
scale=(0.25, 0.25, 0.25),
semantic_label=prim_type,
)
# add rigid properties
GeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True)
rigid_obj = RigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0)
# cast to geom prim
geom_prim = getattr(UsdGeom, prim_type)(rigid_obj.prim)
# set random color
color = Gf.Vec3f(random.random(), random.random(), random.random())
geom_prim.CreateDisplayColorAttr()
geom_prim.GetDisplayColorAttr().Set([color])
# Setup camera sensor on the world
cam_prim_path = "/World/CameraSensor"
else:
# Robot
prim_utils.create_prim(
"/World/Robot",
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_instanceable.usd",
translation=(0.0, 0.0, 0.6),
)
# Setup camera sensor on the robot
cam_prim_path = "/World/CameraSensor"
# Create camera
cam_prim = prim_utils.create_prim(
cam_prim_path,
prim_type="Camera",
translation=(5.0, 5.0, 5.0),
orientation=(0.33985113, 0.17591988, 0.42470818, 0.82047324),
)
_ = UsdGeom.Camera(cam_prim)
# Get render product
render_prod_path = rep.create.render_product(cam_prim_path, resolution=(640, 480))
# create annotator node
rep_registry = {}
for name in ["rgb", "distance_to_image_plane"]:
# create annotator
rep_annotator = rep.AnnotatorRegistry.get_annotator(name, device="cpu")
rep_annotator.attach(render_prod_path)
# add to registry
rep_registry[name] = rep_annotator
# Create replicator writer
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera", args_cli.scenario)
os.makedirs(output_dir, exist_ok=True)
# Create a view of the stuff we want to see
if args_cli.scenario == "cube":
view: RigidPrimView = world.scene.add(RigidPrimView("/World/Objects/.*", name="my_object"))
else:
view: ArticulationView = world.scene.add(ArticulationView("/World/Robot", name="my_object"))
# Play simulator
world.reset()
# Get initial state
if args_cli.scenario == "cube":
initial_pos, initial_quat = view.get_world_poses()
initial_joint_pos = None
initial_joint_vel = None
else:
initial_pos, initial_quat = view.get_world_poses()
initial_joint_pos = view.get_joint_positions()
initial_joint_vel = view.get_joint_velocities()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
world.step(render=True)
# Counter
count = 0
prev_im = None
# make episode directory
episode_count = 0
episode_dir = os.path.join(output_dir, f"episode_{episode_count:06d}")
os.makedirs(episode_dir, exist_ok=True)
# Simulate physics
while simulation_app.is_running():
# If simulation is stopped, then exit.
if world.is_stopped():
break
# If simulation is paused, then skip.
if not world.is_playing():
world.step(render=False)
continue
# Reset on intervals
if count % 25 == 0:
# reset all the state
view.set_world_poses(initial_pos, initial_quat)
if initial_joint_pos is not None:
view.set_joint_positions(initial_joint_pos)
if initial_joint_vel is not None:
view.set_joint_velocities(initial_joint_vel)
# make a new episode directory
episode_dir = os.path.join(output_dir, f"episode_{episode_count:06d}")
os.makedirs(episode_dir, exist_ok=True)
# reset counters
count = 0
episode_count += 1
# Step simulation
for _ in range(15):
world.step(render=False)
world.render()
# Update camera data
rgb_data = rep_registry["rgb"].get_data()
depth_data = rep_registry["distance_to_image_plane"].get_data()
# Show current image number
print(f"[Epi {episode_count:03d}] Current image number: {count:06d}")
# Save data
curr_im = Image.fromarray(rgb_data)
curr_im.save(os.path.join(episode_dir, f"{count:06d}_rgb.png"))
# Save diff
if prev_im is not None:
diff_im = ImageChops.difference(curr_im, prev_im)
# convert to grayscale and threshold
diff_im = diff_im.convert("L")
threshold = 30
diff_im = diff_im.point(lambda p: p > threshold and 255)
# Save all of them together
dst_im = Image.new("RGB", (curr_im.width + prev_im.width + diff_im.width, diff_im.height))
dst_im.paste(prev_im, (0, 0))
dst_im.paste(curr_im, (prev_im.width, 0))
dst_im.paste(diff_im, (2 * prev_im.width, 0))
dst_im.save(os.path.join(episode_dir, f"{count:06d}_diff.png"))
# Save to previous
prev_im = curr_im.copy()
# Update counter
count += 1
# Print camera info
print("Received shape of rgb image: ", rgb_data.shape)
print("Received shape of depth image: ", depth_data.shape)
print("-------------------------------")
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 9,656 |
Python
| 37.019685 | 117 | 0.634631 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/deps/isaacsim/check_legged_robot_clone.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to use the cloner API from Isaac Sim.
Reference: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_gym_cloner.html
"""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(
description="This script shows the issue in Isaac Sim with GPU simulation of floating robots."
)
parser.add_argument("--num_robots", type=int, default=128, help="Number of robots to spawn.")
parser.add_argument(
"--asset",
type=str,
default="isaaclab",
help="The asset source location for the robot. Can be: isaaclab, oige, custom asset path.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import os
import torch
import carb
try:
import omni.isaac.nucleus as nucleus_utils
except ModuleNotFoundError:
import omni.isaac.core.utils.nucleus as nucleus_utils
import omni.isaac.core.utils.prims as prim_utils
from omni.isaac.cloner import GridCloner
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.carb import set_carb_setting
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.core.world import World
# check nucleus connection
if nucleus_utils.get_assets_root_path() is None:
msg = (
"Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n"
"\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus"
)
carb.log_error(msg)
raise RuntimeError(msg)
ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac"
"""Path to the `Isaac` directory on the NVIDIA Nucleus Server."""
ISAACLAB_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac/Samples/Orbit"
"""Path to the `Isaac/Samples/Orbit` directory on the NVIDIA Nucleus Server."""
"""
Main
"""
def main():
"""Spawns the ANYmal robot and clones it using Isaac Sim Cloner API."""
# Load kit helper
world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cuda:0")
# Set main camera
set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Enable hydra scene-graph instancing
# this is needed to visualize the scene when flatcache is enabled
set_carb_setting(world._settings, "/persistent/omnihydra/useSceneGraphInstancing", True)
# Create interface to clone the scene
cloner = GridCloner(spacing=2.0)
cloner.define_base_env("/World/envs")
# Everything under the namespace "/World/envs/env_0" will be cloned
prim_utils.define_prim("/World/envs/env_0")
# Spawn things into stage
# Ground-plane
world.scene.add_default_ground_plane(prim_path="/World/defaultGroundPlane", z_position=0.0)
# Lights-1
prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0))
# Lights-2
prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0))
# -- Robot
# resolve asset
if args_cli.asset == "isaaclab":
usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd"
root_prim_path = "/World/envs/env_.*/Robot/base"
elif args_cli.asset == "oige":
usd_path = f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_instanceable.usd"
root_prim_path = "/World/envs/env_.*/Robot"
elif os.path.exists(args_cli.asset):
usd_path = args_cli.asset
else:
raise ValueError(f"Invalid asset: {args_cli.asset}. Must be one of: isaaclab, oige.")
# add asset
print("Loading robot from: ", usd_path)
prim_utils.create_prim(
"/World/envs/env_0/Robot",
usd_path=usd_path,
translation=(0.0, 0.0, 0.6),
)
# Clone the scene
num_envs = args_cli.num_robots
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs)
envs_positions = cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True
)
# convert environment positions to torch tensor
envs_positions = torch.tensor(envs_positions, dtype=torch.float, device=world.device)
# filter collisions within each environment instance
physics_scene_path = world.get_physics_context().prim_path
cloner.filter_collisions(
physics_scene_path, "/World/collisions", envs_prim_paths, global_paths=["/World/defaultGroundPlane"]
)
# Resolve robot prim paths
if args_cli.asset == "isaaclab":
root_prim_path = "/World/envs/env_.*/Robot/base"
elif args_cli.asset == "oige":
root_prim_path = "/World/envs/env_.*/Robot"
elif os.path.exists(args_cli.asset):
usd_path = args_cli.asset
root_prim_path = "/World/envs/env_.*/Robot"
else:
raise ValueError(f"Invalid asset: {args_cli.asset}. Must be one of: isaaclab, oige.")
# Setup robot
robot_view = ArticulationView(root_prim_path, name="ANYMAL")
world.scene.add(robot_view)
# Play the simulator
world.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# dummy actions
# actions = torch.zeros(robot.count, robot.num_actions, device=robot.device)
# Define simulation stepping
sim_dt = world.get_physics_dt()
# episode counter
sim_time = 0.0
# Simulate physics
while simulation_app.is_running():
# If simulation is stopped, then exit.
if world.is_stopped():
break
# If simulation is paused, then skip.
if not world.is_playing():
world.step(render=False)
continue
# perform step
world.step()
# update sim-time
sim_time += sim_dt
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 6,205 |
Python
| 32.545946 | 117 | 0.67913 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/deps/isaacsim/check_rep_texture_randomizer.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script shows how to use replicator to randomly change the textures of a USD scene.
Note:
Currently this script fails since cloner does not support changing textures of cloned
USD prims. This is because the prims are cloned using `Sdf.ChangeBlock` which does not
allow individual texture changes.
Usage:
.. code-block:: bash
./isaaclab.sh -p source/extensions/omni.isaac.lab/test/deps/isaacsim/check_rep_texture_randomizer.py
"""
"""Launch Isaac Sim Simulator first."""
import argparse
# omni.isaac.lab
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(
description="This script shows how to use replicator to randomly change the textures of a USD scene."
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.replicator.core as rep
from omni.isaac.cloner import GridCloner
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.viewports import set_camera_view
def main():
"""Spawn a bunch of balls and randomly change their textures."""
# Load kit helper
sim_params = {
"use_gpu": True,
"use_gpu_pipeline": True,
"use_flatcache": True, # deprecated from Isaac Sim 2023.1 onwards
"use_fabric": True, # used from Isaac Sim 2023.1 onwards
"enable_scene_query_support": True,
}
sim = SimulationContext(
physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, sim_params=sim_params, backend="torch", device="cuda:0"
)
# Set main camera
set_camera_view([0.0, 30.0, 25.0], [0.0, 0.0, -2.5])
# Parameters
num_balls = 128
# Create interface to clone the scene
cloner = GridCloner(spacing=2.0)
cloner.define_base_env("/World/envs")
# Everything under the namespace "/World/envs/env_0" will be cloned
prim_utils.define_prim("/World/envs/env_0")
# Define the scene
# -- Ball
DynamicSphere(prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25)
# Clone the scene
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls)
env_positions = cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True, copy_from_source=True
)
physics_scene_path = sim.get_physics_context().prim_path
cloner.filter_collisions(
physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"]
)
# Use replicator to randomize color on the spheres
with rep.new_layer():
# Define a function to get all the shapes
def get_shapes():
shapes = rep.get.prims(path_pattern="/World/envs/env_.*/ball")
with shapes:
rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1)))
return shapes.node
# Register the function
rep.randomizer.register(get_shapes)
# Specify the frequency of randomization
with rep.trigger.on_frame():
rep.randomizer.get_shapes()
# Set ball positions over terrain origins
# Create a view over all the balls
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
# cache initial state of the balls
ball_initial_positions = torch.tensor(env_positions, dtype=torch.float, device=sim.device)
ball_initial_positions[:, 2] += 5.0
# set initial poses
# note: setting here writes to USD :)
ball_view.set_world_poses(positions=ball_initial_positions)
# Play simulator
sim.reset()
# Step replicator to randomize colors
rep.orchestrator.step(pause_timeline=False)
# Stop replicator to prevent further randomization
rep.orchestrator.stop()
# Pause simulator at the beginning for inspection
sim.pause()
# Initialize the ball views for physics simulation
ball_view.initialize()
ball_initial_velocities = ball_view.get_velocities()
# Create a counter for resetting the scene
step_count = 0
# Simulate physics
while simulation_app.is_running():
# If simulation is stopped, then exit.
if sim.is_stopped():
break
# If simulation is paused, then skip.
if not sim.is_playing():
sim.step()
continue
# Reset the scene
if step_count % 500 == 0:
# reset the balls
ball_view.set_world_poses(positions=ball_initial_positions)
ball_view.set_velocities(ball_initial_velocities)
# reset the counter
step_count = 0
# Step simulation
sim.step()
# Update counter
step_count += 1
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 5,377 |
Python
| 31.593939 | 119 | 0.670634 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/controllers/test_differential_ik.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
"""Rest everything follows."""
import torch
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.cloner import GridCloner
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.controllers import DifferentialIKController, DifferentialIKControllerCfg
from omni.isaac.lab.utils.math import compute_pose_error, subtract_frame_transforms
##
# Pre-defined configs
##
from omni.isaac.lab_assets import FRANKA_PANDA_HIGH_PD_CFG, UR10_CFG # isort:skip
class TestDifferentialIKController(unittest.TestCase):
"""Test fixture for checking that differential IK controller tracks commands properly."""
def setUp(self):
"""Create a blank new stage for each test."""
# Wait for spawning
stage_utils.create_new_stage()
# Constants
self.num_envs = 128
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=0.01)
self.sim = sim_utils.SimulationContext(sim_cfg)
# TODO: Remove this once we have a better way to handle this.
self.sim._app_control_on_stop_handle = None
# Create a ground plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/GroundPlane", cfg)
# Create interface to clone the scene
cloner = GridCloner(spacing=2.0)
cloner.define_base_env("/World/envs")
self.env_prim_paths = cloner.generate_paths("/World/envs/env", self.num_envs)
# create source prim
prim_utils.define_prim(self.env_prim_paths[0], "Xform")
# clone the env xform
self.env_origins = cloner.clone(
source_prim_path=self.env_prim_paths[0],
prim_paths=self.env_prim_paths,
replicate_physics=True,
)
# Define goals for the arm
ee_goals_set = [
[0.5, 0.5, 0.7, 0.707, 0, 0.707, 0],
[0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0],
[0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0],
]
self.ee_pose_b_des_set = torch.tensor(ee_goals_set, device=self.sim.device)
def tearDown(self):
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
self.sim.clear()
self.sim.clear_all_callbacks()
self.sim.clear_instance()
"""
Test fixtures.
"""
def test_franka_ik_pose_abs(self):
"""Test IK controller for Franka arm with Franka hand."""
# Create robot instance
robot_cfg = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="/World/envs/env_.*/Robot")
robot = Articulation(cfg=robot_cfg)
# Create IK controller
diff_ik_cfg = DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls")
diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=self.num_envs, device=self.sim.device)
# Run the controller and check that it converges to the goal
self._run_ik_controller(robot, diff_ik_controller, "panda_hand", ["panda_joint.*"])
def test_ur10_ik_pose_abs(self):
"""Test IK controller for UR10 arm."""
# Create robot instance
robot_cfg = UR10_CFG.replace(prim_path="/World/envs/env_.*/Robot")
robot_cfg.spawn.rigid_props.disable_gravity = True
robot = Articulation(cfg=robot_cfg)
# Create IK controller
diff_ik_cfg = DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls")
diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=self.num_envs, device=self.sim.device)
# Run the controller and check that it converges to the goal
self._run_ik_controller(robot, diff_ik_controller, "ee_link", [".*"])
"""
Helper functions.
"""
def _run_ik_controller(
self,
robot: Articulation,
diff_ik_controller: DifferentialIKController,
ee_frame_name: str,
arm_joint_names: list[str],
):
# Define simulation stepping
sim_dt = self.sim.get_physics_dt()
# Play the simulator
self.sim.reset()
# Obtain the frame index of the end-effector
ee_frame_idx = robot.find_bodies(ee_frame_name)[0][0]
ee_jacobi_idx = ee_frame_idx - 1
# Obtain joint indices
arm_joint_ids = robot.find_joints(arm_joint_names)[0]
# Update existing buffers
# Note: We need to update buffers before the first step for the controller.
robot.update(dt=sim_dt)
# Track the given command
current_goal_idx = 0
# Current goal for the arm
ee_pose_b_des = torch.zeros(self.num_envs, diff_ik_controller.action_dim, device=self.sim.device)
ee_pose_b_des[:] = self.ee_pose_b_des_set[current_goal_idx]
# Compute current pose of the end-effector
ee_pose_w = robot.data.body_state_w[:, ee_frame_idx, 0:7]
root_pose_w = robot.data.root_state_w[:, 0:7]
ee_pos_b, ee_quat_b = subtract_frame_transforms(
root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]
)
# Now we are ready!
for count in range(1500):
# reset every 150 steps
if count % 250 == 0:
# check that we converged to the goal
if count > 0:
pos_error, rot_error = compute_pose_error(
ee_pos_b, ee_quat_b, ee_pose_b_des[:, 0:3], ee_pose_b_des[:, 3:7]
)
pos_error_norm = torch.norm(pos_error, dim=-1)
rot_error_norm = torch.norm(rot_error, dim=-1)
# desired error (zer)
des_error = torch.zeros_like(pos_error_norm)
# check convergence
torch.testing.assert_close(pos_error_norm, des_error, rtol=0.0, atol=1e-3)
torch.testing.assert_close(rot_error_norm, des_error, rtol=0.0, atol=1e-3)
# reset joint state
joint_pos = robot.data.default_joint_pos.clone()
joint_vel = robot.data.default_joint_vel.clone()
# joint_pos *= sample_uniform(0.9, 1.1, joint_pos.shape, joint_pos.device)
robot.write_joint_state_to_sim(joint_pos, joint_vel)
robot.set_joint_position_target(joint_pos)
robot.write_data_to_sim()
robot.reset()
# reset actions
ee_pose_b_des[:] = self.ee_pose_b_des_set[current_goal_idx]
joint_pos_des = joint_pos[:, arm_joint_ids].clone()
# update goal for next iteration
current_goal_idx = (current_goal_idx + 1) % len(self.ee_pose_b_des_set)
# set the controller commands
diff_ik_controller.reset()
diff_ik_controller.set_command(ee_pose_b_des)
else:
# at reset, the jacobians are not updated to the latest state
# so we MUST skip the first step
# obtain quantities from simulation
jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids]
ee_pose_w = robot.data.body_state_w[:, ee_frame_idx, 0:7]
root_pose_w = robot.data.root_state_w[:, 0:7]
joint_pos = robot.data.joint_pos[:, arm_joint_ids]
# compute frame in root frame
ee_pos_b, ee_quat_b = subtract_frame_transforms(
root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]
)
# compute the joint commands
joint_pos_des = diff_ik_controller.compute(ee_pos_b, ee_quat_b, jacobian, joint_pos)
# apply actions
robot.set_joint_position_target(joint_pos_des, arm_joint_ids)
robot.write_data_to_sim()
# perform step
self.sim.step(render=False)
# update buffers
robot.update(sim_dt)
if __name__ == "__main__":
run_tests()
| 8,419 |
Python
| 39.676328 | 114 | 0.591994 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sim/test_spawn_from_files.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.app import AppLauncher, run_tests
"""Launch Isaac Sim Simulator first."""
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
"""Rest everything follows."""
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension, get_extension_path_from_name
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
class TestSpawningFromFiles(unittest.TestCase):
"""Test fixture for checking spawning of USD references from files with different settings."""
def setUp(self) -> None:
"""Create a blank new stage for each test."""
# Create a new stage
stage_utils.create_new_stage()
# Simulation time-step
self.dt = 0.1
# Load kit helper
self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy")
# Wait for spawning
stage_utils.update_stage()
def tearDown(self) -> None:
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
self.sim.clear()
self.sim.clear_all_callbacks()
self.sim.clear_instance()
"""
Basic spawning.
"""
def test_spawn_usd(self):
"""Test loading prim from Usd file."""
# Spawn cone
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd")
prim = cfg.func("/World/Franka", cfg)
# Check validity
self.assertTrue(prim.IsValid())
self.assertTrue(prim_utils.is_prim_path_valid("/World/Franka"))
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform")
def test_spawn_usd_fails(self):
"""Test loading prim from Usd file fails when asset usd path is invalid."""
# Spawn cone
cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda2_instanceable.usd")
with self.assertRaises(FileNotFoundError):
cfg.func("/World/Franka", cfg)
def test_spawn_urdf(self):
"""Test loading prim from URDF file."""
# retrieve path to urdf importer extension
enable_extension("omni.importer.urdf")
extension_path = get_extension_path_from_name("omni.importer.urdf")
# Spawn franka from URDF
cfg = sim_utils.UrdfFileCfg(
asset_path=f"{extension_path}/data/urdf/robots/franka_description/robots/panda_arm_hand.urdf", fix_base=True
)
prim = cfg.func("/World/Franka", cfg)
# Check validity
self.assertTrue(prim.IsValid())
self.assertTrue(prim_utils.is_prim_path_valid("/World/Franka"))
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform")
def test_spawn_ground_plane(self):
"""Test loading prim for the ground plane from grid world USD."""
# Spawn ground plane
cfg = sim_utils.GroundPlaneCfg(color=(0.1, 0.1, 0.1), size=(10.0, 10.0))
prim = cfg.func("/World/ground_plane", cfg)
# Check validity
self.assertTrue(prim.IsValid())
self.assertTrue(prim_utils.is_prim_path_valid("/World/ground_plane"))
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Xform")
if __name__ == "__main__":
run_tests()
| 3,569 |
Python
| 35.428571 | 120 | 0.660129 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sim/test_urdf_converter.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
config = {"headless": True}
simulation_app = AppLauncher(config).app
"""Rest everything follows."""
import math
import numpy as np
import os
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.extensions import enable_extension, get_extension_path_from_name
from omni.isaac.lab.sim.converters import UrdfConverter, UrdfConverterCfg
class TestUrdfConverter(unittest.TestCase):
"""Test fixture for the UrdfConverter class."""
def setUp(self):
"""Create a blank new stage for each test."""
# Create a new stage
stage_utils.create_new_stage()
# retrieve path to urdf importer extension
enable_extension("omni.importer.urdf")
extension_path = get_extension_path_from_name("omni.importer.urdf")
# default configuration
self.config = UrdfConverterCfg(
asset_path=f"{extension_path}/data/urdf/robots/franka_description/robots/panda_arm_hand.urdf",
fix_base=True,
)
# Simulation time-step
self.dt = 0.01
# Load kit helper
self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy")
def tearDown(self) -> None:
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
# cleanup stage and context
self.sim.clear()
self.sim.clear_all_callbacks()
self.sim.clear_instance()
def test_no_change(self):
"""Call conversion twice. This should not generate a new USD file."""
urdf_converter = UrdfConverter(self.config)
time_usd_file_created = os.stat(urdf_converter.usd_path).st_mtime_ns
# no change to config only define the usd directory
new_config = self.config
new_config.usd_dir = urdf_converter.usd_dir
# convert to usd but this time in the same directory as previous step
new_urdf_converter = UrdfConverter(new_config)
new_time_usd_file_created = os.stat(new_urdf_converter.usd_path).st_mtime_ns
self.assertEqual(time_usd_file_created, new_time_usd_file_created)
def test_config_change(self):
"""Call conversion twice but change the config in the second call. This should generate a new USD file."""
urdf_converter = UrdfConverter(self.config)
time_usd_file_created = os.stat(urdf_converter.usd_path).st_mtime_ns
# change the config
new_config = self.config
new_config.fix_base = not self.config.fix_base
# define the usd directory
new_config.usd_dir = urdf_converter.usd_dir
# convert to usd but this time in the same directory as previous step
new_urdf_converter = UrdfConverter(new_config)
new_time_usd_file_created = os.stat(new_urdf_converter.usd_path).st_mtime_ns
self.assertNotEqual(time_usd_file_created, new_time_usd_file_created)
def test_create_prim_from_usd(self):
"""Call conversion and create a prim from it."""
urdf_converter = UrdfConverter(self.config)
prim_path = "/World/Robot"
prim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path)
self.assertTrue(prim_utils.is_prim_path_valid(prim_path))
def test_config_drive_type(self):
"""Change the drive mechanism of the robot to be position."""
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(test_dir, "output", "urdf_converter")
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
# change the config
self.config.force_usd_conversion = True
self.config.default_drive_type = "position"
self.config.default_drive_stiffness = 400.0
self.config.default_drive_damping = 40.0
self.config.override_joint_dynamics = True
self.config.usd_dir = output_dir
urdf_converter = UrdfConverter(self.config)
# check the drive type of the robot
prim_path = "/World/Robot"
prim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path)
# access the robot
robot = ArticulationView(prim_path, reset_xform_properties=False)
# play the simulator and initialize the robot
self.sim.reset()
robot.initialize()
# check drive values for the robot (read from physx)
drive_stiffness, drive_damping = robot.get_gains()
# -- for the arm (revolute joints)
# user provides the values in radians but simulator sets them as in degrees
expected_drive_stiffness = math.degrees(self.config.default_drive_stiffness)
expected_drive_damping = math.degrees(self.config.default_drive_damping)
np.testing.assert_array_equal(drive_stiffness[:, :7], expected_drive_stiffness)
np.testing.assert_array_equal(drive_damping[:, :7], expected_drive_damping)
# -- for the hand (prismatic joints)
# note: from isaac sim 2023.1, the test asset has mimic joints for the hand
# so the mimic joint doesn't have drive values
expected_drive_stiffness = self.config.default_drive_stiffness
expected_drive_damping = self.config.default_drive_damping
np.testing.assert_array_equal(drive_stiffness[:, 7], expected_drive_stiffness)
np.testing.assert_array_equal(drive_damping[:, 7], expected_drive_damping)
# check drive values for the robot (read from usd)
self.sim.stop()
drive_stiffness, drive_damping = robot.get_gains()
# -- for the arm (revolute joints)
# user provides the values in radians but simulator sets them as in degrees
expected_drive_stiffness = math.degrees(self.config.default_drive_stiffness)
expected_drive_damping = math.degrees(self.config.default_drive_damping)
np.testing.assert_array_equal(drive_stiffness[:, :7], expected_drive_stiffness)
np.testing.assert_array_equal(drive_damping[:, :7], expected_drive_damping)
# -- for the hand (prismatic joints)
# note: from isaac sim 2023.1, the test asset has mimic joints for the hand
# so the mimic joint doesn't have drive values
expected_drive_stiffness = self.config.default_drive_stiffness
expected_drive_damping = self.config.default_drive_damping
np.testing.assert_array_equal(drive_stiffness[:, 7], expected_drive_stiffness)
np.testing.assert_array_equal(drive_damping[:, 7], expected_drive_damping)
if __name__ == "__main__":
run_tests()
| 7,002 |
Python
| 41.701219 | 114 | 0.67595 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sim/test_schemas.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
"""Rest everything follows."""
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.core.simulation_context import SimulationContext
from pxr import UsdPhysics
import omni.isaac.lab.sim.schemas as schemas
from omni.isaac.lab.sim.utils import find_global_fixed_joint_prim
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.lab.utils.string import to_camel_case
class TestPhysicsSchema(unittest.TestCase):
"""Test fixture for checking schemas modifications through Isaac Lab."""
def setUp(self) -> None:
"""Create a blank new stage for each test."""
# Create a new stage
stage_utils.create_new_stage()
# Simulation time-step
self.dt = 0.1
# Load kit helper
self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy")
# Set some default values for test
self.arti_cfg = schemas.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
articulation_enabled=True,
solver_position_iteration_count=4,
solver_velocity_iteration_count=1,
sleep_threshold=1.0,
stabilization_threshold=5.0,
fix_root_link=False,
)
self.rigid_cfg = schemas.RigidBodyPropertiesCfg(
rigid_body_enabled=True,
kinematic_enabled=False,
disable_gravity=False,
linear_damping=0.1,
angular_damping=0.5,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=10.0,
max_contact_impulse=10.0,
enable_gyroscopic_forces=True,
retain_accelerations=True,
solver_position_iteration_count=8,
solver_velocity_iteration_count=1,
sleep_threshold=1.0,
stabilization_threshold=6.0,
)
self.collision_cfg = schemas.CollisionPropertiesCfg(
collision_enabled=True,
contact_offset=0.05,
rest_offset=0.001,
min_torsional_patch_radius=0.1,
torsional_patch_radius=1.0,
)
self.mass_cfg = schemas.MassPropertiesCfg(mass=1.0, density=100.0)
self.joint_cfg = schemas.JointDrivePropertiesCfg(drive_type="acceleration")
def tearDown(self) -> None:
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
self.sim.clear()
self.sim.clear_all_callbacks()
self.sim.clear_instance()
def test_valid_properties_cfg(self):
"""Test that all the config instances have non-None values.
This is to ensure that we check that all the properties of the schema are set.
"""
for cfg in [self.arti_cfg, self.rigid_cfg, self.collision_cfg, self.mass_cfg, self.joint_cfg]:
# check nothing is none
for k, v in cfg.__dict__.items():
self.assertIsNotNone(v, f"{cfg.__class__.__name__}:{k} is None. Please make sure schemas are valid.")
def test_modify_properties_on_invalid_prim(self):
"""Test modifying properties on a prim that does not exist."""
# set properties
with self.assertRaises(ValueError):
schemas.modify_rigid_body_properties("/World/asset_xyz", self.rigid_cfg)
def test_modify_properties_on_articulation_instanced_usd(self):
"""Test modifying properties on articulation instanced usd.
In this case, modifying collision properties on the articulation instanced usd will fail.
"""
# spawn asset to the stage
asset_usd_file = f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_instanceable.usd"
prim_utils.create_prim("/World/asset_instanced", usd_path=asset_usd_file, translation=(0.0, 0.0, 0.62))
# set properties on the asset and check all properties are set
schemas.modify_articulation_root_properties("/World/asset_instanced", self.arti_cfg)
schemas.modify_rigid_body_properties("/World/asset_instanced", self.rigid_cfg)
schemas.modify_mass_properties("/World/asset_instanced", self.mass_cfg)
schemas.modify_joint_drive_properties("/World/asset_instanced", self.joint_cfg)
# validate the properties
self._validate_articulation_properties_on_prim("/World/asset_instanced", has_default_fixed_root=False)
self._validate_rigid_body_properties_on_prim("/World/asset_instanced")
self._validate_mass_properties_on_prim("/World/asset_instanced")
self._validate_joint_drive_properties_on_prim("/World/asset_instanced")
# make a fixed joint
# note: for this asset, it doesn't work because the root is not a rigid body
self.arti_cfg.fix_root_link = True
with self.assertRaises(NotImplementedError):
schemas.modify_articulation_root_properties("/World/asset_instanced", self.arti_cfg)
def test_modify_properties_on_articulation_usd(self):
"""Test setting properties on articulation usd."""
# spawn asset to the stage
asset_usd_file = f"{ISAAC_NUCLEUS_DIR}/Robots/Franka/franka.usd"
prim_utils.create_prim("/World/asset", usd_path=asset_usd_file, translation=(0.0, 0.0, 0.62))
# set properties on the asset and check all properties are set
schemas.modify_articulation_root_properties("/World/asset", self.arti_cfg)
schemas.modify_rigid_body_properties("/World/asset", self.rigid_cfg)
schemas.modify_collision_properties("/World/asset", self.collision_cfg)
schemas.modify_mass_properties("/World/asset", self.mass_cfg)
schemas.modify_joint_drive_properties("/World/asset", self.joint_cfg)
# validate the properties
self._validate_articulation_properties_on_prim("/World/asset", has_default_fixed_root=True)
self._validate_rigid_body_properties_on_prim("/World/asset")
self._validate_collision_properties_on_prim("/World/asset")
self._validate_mass_properties_on_prim("/World/asset")
self._validate_joint_drive_properties_on_prim("/World/asset")
# make a fixed joint
self.arti_cfg.fix_root_link = True
schemas.modify_articulation_root_properties("/World/asset", self.arti_cfg)
# validate the properties
self._validate_articulation_properties_on_prim("/World/asset", has_default_fixed_root=True)
def test_defining_rigid_body_properties_on_prim(self):
"""Test defining rigid body properties on a prim."""
# create a prim
prim_utils.create_prim("/World/parent", prim_type="XForm")
# spawn a prim
prim_utils.create_prim("/World/cube1", prim_type="Cube", translation=(0.0, 0.0, 0.62))
# set properties on the asset and check all properties are set
schemas.define_rigid_body_properties("/World/cube1", self.rigid_cfg)
schemas.define_collision_properties("/World/cube1", self.collision_cfg)
schemas.define_mass_properties("/World/cube1", self.mass_cfg)
# validate the properties
self._validate_rigid_body_properties_on_prim("/World/cube1")
self._validate_collision_properties_on_prim("/World/cube1")
self._validate_mass_properties_on_prim("/World/cube1")
# spawn another prim
prim_utils.create_prim("/World/cube2", prim_type="Cube", translation=(1.0, 1.0, 0.62))
# set properties on the asset and check all properties are set
schemas.define_rigid_body_properties("/World/cube2", self.rigid_cfg)
schemas.define_collision_properties("/World/cube2", self.collision_cfg)
# validate the properties
self._validate_rigid_body_properties_on_prim("/World/cube2")
self._validate_collision_properties_on_prim("/World/cube2")
# check if we can play
self.sim.reset()
for _ in range(100):
self.sim.step()
def test_defining_articulation_properties_on_prim(self):
"""Test defining articulation properties on a prim."""
# create a parent articulation
prim_utils.create_prim("/World/parent", prim_type="Xform")
schemas.define_articulation_root_properties("/World/parent", self.arti_cfg)
# validate the properties
self._validate_articulation_properties_on_prim("/World/parent", has_default_fixed_root=False)
# create a child articulation
prim_utils.create_prim("/World/parent/child", prim_type="Cube", translation=(0.0, 0.0, 0.62))
schemas.define_rigid_body_properties("/World/parent/child", self.rigid_cfg)
schemas.define_mass_properties("/World/parent/child", self.mass_cfg)
# check if we can play
self.sim.reset()
for _ in range(100):
self.sim.step()
"""
Helper functions.
"""
def _validate_articulation_properties_on_prim(
self, prim_path: str, has_default_fixed_root: False, verbose: bool = False
):
"""Validate the articulation properties on the prim.
If :attr:`has_default_fixed_root` is True, then the asset already has a fixed root link. This is used to check the
expected behavior of the fixed root link configuration.
"""
# the root prim
root_prim = prim_utils.get_prim_at_path(prim_path)
# check articulation properties are set correctly
for attr_name, attr_value in self.arti_cfg.__dict__.items():
# skip names we know are not present
if attr_name == "func":
continue
# handle fixed root link
if attr_name == "fix_root_link" and attr_value is not None:
# obtain the fixed joint prim
fixed_joint_prim = find_global_fixed_joint_prim(prim_path)
# if asset does not have a fixed root link then check if the joint is created
if not has_default_fixed_root:
if attr_value:
self.assertIsNotNone(fixed_joint_prim)
else:
self.assertIsNone(fixed_joint_prim)
else:
# check a joint exists
self.assertIsNotNone(fixed_joint_prim)
# check if the joint is enabled or disabled
is_enabled = fixed_joint_prim.GetJointEnabledAttr().Get()
self.assertEqual(is_enabled, attr_value)
# skip the rest of the checks
continue
# convert attribute name in prim to cfg name
prim_prop_name = f"physxArticulation:{to_camel_case(attr_name, to='cC')}"
# validate the values
self.assertAlmostEqual(
root_prim.GetAttribute(prim_prop_name).Get(),
attr_value,
places=5,
msg=f"Failed setting for {prim_prop_name}",
)
def _validate_rigid_body_properties_on_prim(self, prim_path: str, verbose: bool = False):
"""Validate the rigid body properties on the prim.
Note:
Right now this function exploits the hierarchy in the asset to check the properties. This is not a
fool-proof way of checking the properties.
"""
# the root prim
root_prim = prim_utils.get_prim_at_path(prim_path)
# check rigid body properties are set correctly
for link_prim in root_prim.GetChildren():
if UsdPhysics.RigidBodyAPI(link_prim):
for attr_name, attr_value in self.rigid_cfg.__dict__.items():
# skip names we know are not present
if attr_name in ["func", "rigid_body_enabled", "kinematic_enabled"]:
continue
# convert attribute name in prim to cfg name
prim_prop_name = f"physxRigidBody:{to_camel_case(attr_name, to='cC')}"
# validate the values
self.assertAlmostEqual(
link_prim.GetAttribute(prim_prop_name).Get(),
attr_value,
places=5,
msg=f"Failed setting for {prim_prop_name}",
)
elif verbose:
print(f"Skipping prim {link_prim.GetPrimPath()} as it is not a rigid body.")
def _validate_collision_properties_on_prim(self, prim_path: str, verbose: bool = False):
"""Validate the collision properties on the prim.
Note:
Right now this function exploits the hierarchy in the asset to check the properties. This is not a
fool-proof way of checking the properties.
"""
# the root prim
root_prim = prim_utils.get_prim_at_path(prim_path)
# check collision properties are set correctly
for link_prim in root_prim.GetChildren():
for mesh_prim in link_prim.GetChildren():
if UsdPhysics.CollisionAPI(mesh_prim):
for attr_name, attr_value in self.collision_cfg.__dict__.items():
# skip names we know are not present
if attr_name in ["func", "collision_enabled"]:
continue
# convert attribute name in prim to cfg name
prim_prop_name = f"physxCollision:{to_camel_case(attr_name, to='cC')}"
# validate the values
self.assertAlmostEqual(
mesh_prim.GetAttribute(prim_prop_name).Get(),
attr_value,
places=5,
msg=f"Failed setting for {prim_prop_name}",
)
elif verbose:
print(f"Skipping prim {mesh_prim.GetPrimPath()} as it is not a collision mesh.")
def _validate_mass_properties_on_prim(self, prim_path: str, verbose: bool = False):
"""Validate the mass properties on the prim.
Note:
Right now this function exploits the hierarchy in the asset to check the properties. This is not a
fool-proof way of checking the properties.
"""
# the root prim
root_prim = prim_utils.get_prim_at_path(prim_path)
# check rigid body mass properties are set correctly
for link_prim in root_prim.GetChildren():
if UsdPhysics.MassAPI(link_prim):
for attr_name, attr_value in self.mass_cfg.__dict__.items():
# skip names we know are not present
if attr_name in ["func"]:
continue
# print(link_prim.GetProperties())
prim_prop_name = f"physics:{to_camel_case(attr_name, to='cC')}"
# validate the values
self.assertAlmostEqual(
link_prim.GetAttribute(prim_prop_name).Get(),
attr_value,
places=5,
msg=f"Failed setting for {prim_prop_name}",
)
elif verbose:
print(f"Skipping prim {link_prim.GetPrimPath()} as it is not a mass api.")
def _validate_joint_drive_properties_on_prim(self, prim_path: str, verbose: bool = False):
"""Validate the mass properties on the prim.
Note:
Right now this function exploits the hierarchy in the asset to check the properties. This is not a
fool-proof way of checking the properties.
"""
# the root prim
root_prim = prim_utils.get_prim_at_path(prim_path)
# check joint drive properties are set correctly
for link_prim in root_prim.GetAllChildren():
for joint_prim in link_prim.GetChildren():
if joint_prim.IsA(UsdPhysics.PrismaticJoint) or joint_prim.IsA(UsdPhysics.RevoluteJoint):
# check it has drive API
self.assertTrue(joint_prim.HasAPI(UsdPhysics.DriveAPI))
# iterate over the joint properties
for attr_name, attr_value in self.joint_cfg.__dict__.items():
# skip names we know are not present
if attr_name == "func":
continue
# manually check joint type
if attr_name == "drive_type":
if joint_prim.IsA(UsdPhysics.PrismaticJoint):
prim_attr_name = "drive:linear:physics:type"
elif joint_prim.IsA(UsdPhysics.RevoluteJoint):
prim_attr_name = "drive:angular:physics:type"
else:
raise ValueError(f"Unknown joint type for prim {joint_prim.GetPrimPath()}")
# check the value
self.assertEqual(attr_value, joint_prim.GetAttribute(prim_attr_name).Get())
continue
elif verbose:
print(f"Skipping prim {joint_prim.GetPrimPath()} as it is not a joint drive api.")
if __name__ == "__main__":
run_tests()
| 17,603 |
Python
| 46.967302 | 122 | 0.600579 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sim/test_spawn_materials.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
"""Rest everything follows."""
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.core.simulation_context import SimulationContext
from pxr import UsdPhysics, UsdShade
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.utils.assets import NVIDIA_NUCLEUS_DIR
class TestSpawningMaterials(unittest.TestCase):
"""Test fixture for checking spawning of materials."""
def setUp(self) -> None:
"""Create a blank new stage for each test."""
# Create a new stage
stage_utils.create_new_stage()
# Simulation time-step
self.dt = 0.1
# Load kit helper
self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy")
# Wait for spawning
stage_utils.update_stage()
def tearDown(self) -> None:
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
self.sim.clear()
self.sim.clear_all_callbacks()
self.sim.clear_instance()
def test_spawn_preview_surface(self):
"""Test spawning preview surface."""
# Spawn preview surface
cfg = sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0))
prim = cfg.func("/Looks/PreviewSurface", cfg)
# Check validity
self.assertTrue(prim.IsValid())
self.assertTrue(prim_utils.is_prim_path_valid("/Looks/PreviewSurface"))
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Shader")
# Check properties
self.assertEqual(prim.GetAttribute("inputs:diffuseColor").Get(), cfg.diffuse_color)
def test_spawn_mdl_material(self):
"""Test spawning mdl material."""
# Spawn mdl material
cfg = sim_utils.materials.MdlFileCfg(
mdl_path=f"{NVIDIA_NUCLEUS_DIR}/Materials/Base/Metals/Aluminum_Anodized.mdl",
project_uvw=True,
albedo_brightness=0.5,
)
prim = cfg.func("/Looks/MdlMaterial", cfg)
# Check validity
self.assertTrue(prim.IsValid())
self.assertTrue(prim_utils.is_prim_path_valid("/Looks/MdlMaterial"))
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Shader")
# Check properties
self.assertEqual(prim.GetAttribute("inputs:project_uvw").Get(), cfg.project_uvw)
self.assertEqual(prim.GetAttribute("inputs:albedo_brightness").Get(), cfg.albedo_brightness)
def test_spawn_glass_mdl_material(self):
"""Test spawning a glass mdl material."""
# Spawn mdl material
cfg = sim_utils.materials.GlassMdlCfg(thin_walled=False, glass_ior=1.0, glass_color=(0.0, 1.0, 0.0))
prim = cfg.func("/Looks/GlassMaterial", cfg)
# Check validity
self.assertTrue(prim.IsValid())
self.assertTrue(prim_utils.is_prim_path_valid("/Looks/GlassMaterial"))
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), "Shader")
# Check properties
self.assertEqual(prim.GetAttribute("inputs:thin_walled").Get(), cfg.thin_walled)
self.assertEqual(prim.GetAttribute("inputs:glass_ior").Get(), cfg.glass_ior)
self.assertEqual(prim.GetAttribute("inputs:glass_color").Get(), cfg.glass_color)
def test_spawn_rigid_body_material(self):
"""Test spawning a rigid body material."""
# spawn physics material
cfg = sim_utils.materials.RigidBodyMaterialCfg(
dynamic_friction=1.5,
restitution=1.5,
static_friction=0.5,
restitution_combine_mode="max",
friction_combine_mode="max",
improve_patch_friction=True,
)
prim = cfg.func("/Looks/RigidBodyMaterial", cfg)
# Check validity
self.assertTrue(prim.IsValid())
self.assertTrue(prim_utils.is_prim_path_valid("/Looks/RigidBodyMaterial"))
# Check properties
self.assertEqual(prim.GetAttribute("physics:staticFriction").Get(), cfg.static_friction)
self.assertEqual(prim.GetAttribute("physics:dynamicFriction").Get(), cfg.dynamic_friction)
self.assertEqual(prim.GetAttribute("physics:restitution").Get(), cfg.restitution)
self.assertEqual(prim.GetAttribute("physxMaterial:improvePatchFriction").Get(), cfg.improve_patch_friction)
self.assertEqual(prim.GetAttribute("physxMaterial:restitutionCombineMode").Get(), cfg.restitution_combine_mode)
self.assertEqual(prim.GetAttribute("physxMaterial:frictionCombineMode").Get(), cfg.friction_combine_mode)
def test_apply_rigid_body_material_on_visual_material(self):
"""Test applying a rigid body material on a visual material."""
# Spawn mdl material
cfg = sim_utils.materials.GlassMdlCfg(thin_walled=False, glass_ior=1.0, glass_color=(0.0, 1.0, 0.0))
prim = cfg.func("/Looks/Material", cfg)
# spawn physics material
cfg = sim_utils.materials.RigidBodyMaterialCfg(
dynamic_friction=1.5,
restitution=1.5,
static_friction=0.5,
restitution_combine_mode="max",
friction_combine_mode="max",
improve_patch_friction=True,
)
prim = cfg.func("/Looks/Material", cfg)
# Check validity
self.assertTrue(prim.IsValid())
self.assertTrue(prim_utils.is_prim_path_valid("/Looks/Material"))
# Check properties
self.assertEqual(prim.GetAttribute("physics:staticFriction").Get(), cfg.static_friction)
self.assertEqual(prim.GetAttribute("physics:dynamicFriction").Get(), cfg.dynamic_friction)
self.assertEqual(prim.GetAttribute("physics:restitution").Get(), cfg.restitution)
self.assertEqual(prim.GetAttribute("physxMaterial:improvePatchFriction").Get(), cfg.improve_patch_friction)
self.assertEqual(prim.GetAttribute("physxMaterial:restitutionCombineMode").Get(), cfg.restitution_combine_mode)
self.assertEqual(prim.GetAttribute("physxMaterial:frictionCombineMode").Get(), cfg.friction_combine_mode)
def test_bind_prim_to_material(self):
"""Test binding a rigid body material on a mesh prim."""
# create a mesh prim
object_prim = prim_utils.create_prim("/World/Geometry/box", "Cube")
UsdPhysics.CollisionAPI.Apply(object_prim)
# create a visual material
visual_material_cfg = sim_utils.GlassMdlCfg(glass_ior=1.0, thin_walled=True)
visual_material_cfg.func("/World/Looks/glassMaterial", visual_material_cfg)
# create a physics material
physics_material_cfg = sim_utils.RigidBodyMaterialCfg(
static_friction=0.5, dynamic_friction=1.5, restitution=1.5
)
physics_material_cfg.func("/World/Physics/rubberMaterial", physics_material_cfg)
# bind the visual material to the mesh prim
sim_utils.bind_visual_material("/World/Geometry/box", "/World/Looks/glassMaterial")
sim_utils.bind_physics_material("/World/Geometry/box", "/World/Physics/rubberMaterial")
# check the main material binding
material_binding_api = UsdShade.MaterialBindingAPI(object_prim)
# -- visual
material_direct_binding = material_binding_api.GetDirectBinding()
self.assertEqual(material_direct_binding.GetMaterialPath(), "/World/Looks/glassMaterial")
self.assertEqual(material_direct_binding.GetMaterialPurpose(), "")
# -- physics
material_direct_binding = material_binding_api.GetDirectBinding("physics")
self.assertEqual(material_direct_binding.GetMaterialPath(), "/World/Physics/rubberMaterial")
self.assertEqual(material_direct_binding.GetMaterialPurpose(), "physics")
if __name__ == "__main__":
run_tests()
| 8,039 |
Python
| 45.473988 | 119 | 0.672969 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.