file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_engines/lazy_search.h
#ifndef SEARCH_ENGINES_LAZY_SEARCH_H #define SEARCH_ENGINES_LAZY_SEARCH_H #include "../evaluation_context.h" #include "../evaluator.h" #include "../open_list.h" #include "../operator_id.h" #include "../search_engine.h" #include "../search_progress.h" #include "../search_space.h" #include "../utils/rng.h" #include <memory> #include <vector> namespace options { class Options; } namespace lazy_search { class LazySearch : public SearchEngine { protected: std::unique_ptr<EdgeOpenList> open_list; // Search behavior parameters bool reopen_closed_nodes; // whether to reopen closed nodes upon finding lower g paths bool randomize_successors; bool preferred_successors_first; std::shared_ptr<utils::RandomNumberGenerator> rng; std::vector<Evaluator *> path_dependent_evaluators; std::vector<std::shared_ptr<Evaluator>> preferred_operator_evaluators; State current_state; StateID current_predecessor_id; OperatorID current_operator_id; int current_g; int current_real_g; EvaluationContext current_eval_context; virtual void initialize() override; virtual SearchStatus step() override; void generate_successors(); SearchStatus fetch_next_state(); void reward_progress(); std::vector<OperatorID> get_successor_operators( const ordered_set::OrderedSet<OperatorID> &preferred_operators) const; public: explicit LazySearch(const options::Options &opts); virtual ~LazySearch() = default; void set_preferred_operator_evaluators(std::vector<std::shared_ptr<Evaluator>> &evaluators); virtual void print_statistics() const override; }; } #endif
1,651
C
24.8125
96
0.719564
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/operator_counting_heuristic.cc
#include "operator_counting_heuristic.h" #include "constraint_generator.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/markup.h" #include <cmath> using namespace std; namespace operator_counting { OperatorCountingHeuristic::OperatorCountingHeuristic(const Options &opts) : Heuristic(opts), constraint_generators( opts.get_list<shared_ptr<ConstraintGenerator>>("constraint_generators")), lp_solver(opts.get<lp::LPSolverType>("lpsolver")), use_integer_operator_counts(opts.get<bool>("use_integer_operator_counts")) { named_vector::NamedVector<lp::LPVariable> variables; double infinity = lp_solver.get_infinity(); for (OperatorProxy op : task_proxy.get_operators()) { int op_cost = op.get_cost(); variables.push_back(lp::LPVariable(0, infinity, op_cost, use_integer_operator_counts)); } named_vector::NamedVector<lp::LPConstraint> constraints; for (const auto &generator : constraint_generators) { generator->initialize_constraints(task, constraints, infinity); } lp_solver.load_problem(lp::LinearProgram(lp::LPObjectiveSense::MINIMIZE, move(variables), move(constraints))); } OperatorCountingHeuristic::~OperatorCountingHeuristic() { } int OperatorCountingHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); assert(!lp_solver.has_temporary_constraints()); for (const auto &generator : constraint_generators) { bool dead_end = generator->update_constraints(state, lp_solver); if (dead_end) { lp_solver.clear_temporary_constraints(); return DEAD_END; } } int result; lp_solver.solve(); if (lp_solver.has_optimal_solution()) { double epsilon = 0.01; double objective_value = lp_solver.get_objective_value(); result = ceil(objective_value - epsilon); } else { result = DEAD_END; } lp_solver.clear_temporary_constraints(); return result; } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis( "Operator counting heuristic", "An operator counting heuristic computes a linear program (LP) in each " "state. The LP has one variable Count_o for each operator o that " "represents how often the operator is used in a plan. Operator " "counting constraints are linear constraints over these varaibles that " "are guaranteed to have a solution with Count_o = occurrences(o, pi) " "for every plan pi. Minimizing the total cost of operators subject to " "some operator counting constraints is an admissible heuristic. " "For details, see" + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert", "Blai Bonet"}, "LP-based Heuristics for Cost-optimal Planning", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7892/8031", "Proceedings of the Twenty-Fourth International Conference" " on Automated Planning and Scheduling (ICAPS 2014)", "226-234", "AAAI Press", "2014")); parser.document_language_support("action costs", "supported"); parser.document_language_support( "conditional effects", "not supported (the heuristic supports them in theory, but none of " "the currently implemented constraint generators do)"); parser.document_language_support( "axioms", "not supported (the heuristic supports them in theory, but none of " "the currently implemented constraint generators do)"); parser.document_property("admissible", "yes"); parser.document_property( "consistent", "yes, if all constraint generators represent consistent heuristics"); parser.document_property("safe", "yes"); // TODO: prefer operators that are non-zero in the solution. parser.document_property("preferred operators", "no"); parser.add_list_option<shared_ptr<ConstraintGenerator>>( "constraint_generators", "methods that generate constraints over operator counting variables"); parser.add_option<bool>( "use_integer_operator_counts", "restrict operator counting variables to integer values. Computing the " "heuristic with integer variables can produce higher values but " "requires solving a MIP instead of an LP which is generally more " "computationally expensive. Turning this option on can thus drastically " "increase the runtime.", "false"); lp::add_lp_solver_option_to_parser(parser); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.help_mode()) return nullptr; opts.verify_list_non_empty<shared_ptr<ConstraintGenerator>>( "constraint_generators"); if (parser.dry_run()) return nullptr; return make_shared<OperatorCountingHeuristic>(opts); } static Plugin<Evaluator> _plugin("operatorcounting", _parse); }
5,123
C++
39.666666
114
0.676557
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/state_equation_constraints.cc
#include "state_equation_constraints.h" #include "../option_parser.h" #include "../plugin.h" #include "../lp/lp_solver.h" #include "../task_utils/task_properties.h" #include "../utils/logging.h" #include "../utils/markup.h" using namespace std; namespace operator_counting { void add_indices_to_constraint(lp::LPConstraint &constraint, const set<int> &indices, double coefficient) { for (int index : indices) { constraint.insert(index, coefficient); } } void StateEquationConstraints::build_propositions(const TaskProxy &task_proxy) { VariablesProxy vars = task_proxy.get_variables(); propositions.reserve(vars.size()); for (VariableProxy var : vars) { propositions.push_back(vector<Proposition>(var.get_domain_size())); } OperatorsProxy ops = task_proxy.get_operators(); for (size_t op_id = 0; op_id < ops.size(); ++op_id) { const OperatorProxy &op = ops[op_id]; vector<int> precondition(vars.size(), -1); for (FactProxy condition : op.get_preconditions()) { int pre_var_id = condition.get_variable().get_id(); precondition[pre_var_id] = condition.get_value(); } for (EffectProxy effect_proxy : op.get_effects()) { FactProxy effect = effect_proxy.get_fact(); int var = effect.get_variable().get_id(); int pre = precondition[var]; int post = effect.get_value(); assert(post != -1); assert(pre != post); if (pre != -1) { propositions[var][post].always_produced_by.insert(op_id); propositions[var][pre].always_consumed_by.insert(op_id); } else { propositions[var][post].sometimes_produced_by.insert(op_id); } } } } void StateEquationConstraints::add_constraints( named_vector::NamedVector<lp::LPConstraint> &constraints, double infinity) { for (vector<Proposition> &var_propositions : propositions) { for (Proposition &prop : var_propositions) { lp::LPConstraint constraint(-infinity, infinity); add_indices_to_constraint(constraint, prop.always_produced_by, 1.0); add_indices_to_constraint(constraint, prop.sometimes_produced_by, 1.0); add_indices_to_constraint(constraint, prop.always_consumed_by, -1.0); if (!constraint.empty()) { prop.constraint_index = constraints.size(); constraints.push_back(constraint); } } } } void StateEquationConstraints::initialize_constraints( const shared_ptr<AbstractTask> &task, named_vector::NamedVector<lp::LPConstraint> &constraints, double infinity) { utils::g_log << "Initializing constraints from state equation." << endl; TaskProxy task_proxy(*task); task_properties::verify_no_axioms(task_proxy); task_properties::verify_no_conditional_effects(task_proxy); build_propositions(task_proxy); add_constraints(constraints, infinity); // Initialize goal state. VariablesProxy variables = task_proxy.get_variables(); goal_state = vector<int>(variables.size(), numeric_limits<int>::max()); for (FactProxy goal : task_proxy.get_goals()) { goal_state[goal.get_variable().get_id()] = goal.get_value(); } } bool StateEquationConstraints::update_constraints(const State &state, lp::LPSolver &lp_solver) { // Compute the bounds for the rows in the LP. for (size_t var = 0; var < propositions.size(); ++var) { int num_values = propositions[var].size(); for (int value = 0; value < num_values; ++value) { const Proposition &prop = propositions[var][value]; // Set row bounds. if (prop.constraint_index >= 0) { double lower_bound = 0; /* If we consider the current value of var, there must be an additional consumer. */ if (state[var].get_value() == value) { --lower_bound; } /* If we consider the goal value of var, there must be an additional producer. */ if (goal_state[var] == value) { ++lower_bound; } lp_solver.set_constraint_lower_bound( prop.constraint_index, lower_bound); } } } return false; } static shared_ptr<ConstraintGenerator> _parse(OptionParser &parser) { parser.document_synopsis( "State equation constraints", "For each fact, a permanent constraint is added that considers the net " "change of the fact, i.e., the total number of times the fact is added " "minus the total number of times is removed. The bounds of each " "constraint depend on the current state and the goal state and are " "updated in each state. For details, see" + utils::format_conference_reference( {"Menkes van den Briel", "J. Benton", "Subbarao Kambhampati", "Thomas Vossen"}, "An LP-based heuristic for optimal planning", "http://link.springer.com/chapter/10.1007/978-3-540-74970-7_46", "Proceedings of the Thirteenth International Conference on" " Principles and Practice of Constraint Programming (CP 2007)", "651-665", "Springer-Verlag", "2007") + utils::format_conference_reference( {"Blai Bonet"}, "An admissible heuristic for SAS+ planning obtained from the" " state equation", "http://ijcai.org/papers13/Papers/IJCAI13-335.pdf", "Proceedings of the Twenty-Third International Joint" " Conference on Artificial Intelligence (IJCAI 2013)", "2268-2274", "AAAI Press", "2013") + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert", "Blai Bonet"}, "LP-based Heuristics for Cost-optimal Planning", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7892/8031", "Proceedings of the Twenty-Fourth International Conference" " on Automated Planning and Scheduling (ICAPS 2014)", "226-234", "AAAI Press", "2014")); if (parser.dry_run()) return nullptr; return make_shared<StateEquationConstraints>(); } static Plugin<ConstraintGenerator> _plugin("state_equation_constraints", _parse); }
6,659
C++
40.886792
99
0.595585
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/constraint_generator.cc
#include "constraint_generator.h" #include "../plugin.h" using namespace std; namespace operator_counting { void ConstraintGenerator::initialize_constraints( const shared_ptr<AbstractTask> &, named_vector::NamedVector<lp::LPConstraint> &, double) { } static PluginTypePlugin<ConstraintGenerator> _type_plugin( "ConstraintGenerator", // TODO: Replace empty string by synopsis for the wiki page. ""); }
421
C++
23.823528
94
0.738717
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/constraint_generator.h
#ifndef OPERATOR_COUNTING_CONSTRAINT_GENERATOR_H #define OPERATOR_COUNTING_CONSTRAINT_GENERATOR_H #include <memory> #include <vector> #include "../algorithms/named_vector.h" class AbstractTask; class State; namespace lp { class LPConstraint; class LPSolver; } namespace operator_counting { /* Derive from this class to add new operator-counting constraints. We support two types of constraints: - *Permanent constraints* are created once for the planning task and then reused for all states that are evaluated. It is possible (and usually necessary) to update the bounds of the constraint for every given state, but not the coefficients. Example: flow constraints such as move_ab + move_ac - move_ba - move_ca <= X, where X depends on the value of "at_a" in the current state and goal. - *Temporary constraints* are added for a given state and then removed. Example: constraints from landmarks generated for a given state, e.g. using the LM-Cut method. */ class ConstraintGenerator { public: virtual ~ConstraintGenerator() = default; /* Called upon initialization for the given task. Use this to add permanent constraints and perform other initialization. The parameter "infinity" is the value that the LP solver uses for infinity. Use it for constraint and variable bounds. */ virtual void initialize_constraints( const std::shared_ptr<AbstractTask> &task, named_vector::NamedVector<lp::LPConstraint> &constraints, double infinity); /* Called before evaluating a state. Use this to add temporary constraints and to set bounds on permanent constraints for this state. All temporary constraints are removed automatically after the evalution. Returns true if a dead end was detected and false otherwise. */ virtual bool update_constraints(const State &state, lp::LPSolver &lp_solver) = 0; }; } #endif
2,006
C
32.449999
78
0.706879
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/lm_cut_constraints.cc
#include "lm_cut_constraints.h" #include "../option_parser.h" #include "../plugin.h" #include "../heuristics/lm_cut_landmarks.h" #include "../lp/lp_solver.h" #include "../utils/markup.h" #include "../utils/memory.h" #include <cassert> using namespace std; namespace operator_counting { void LMCutConstraints::initialize_constraints( const shared_ptr<AbstractTask> &task, named_vector::NamedVector<lp::LPConstraint> & /*constraints*/, double /*infinity*/) { TaskProxy task_proxy(*task); landmark_generator = utils::make_unique_ptr<lm_cut_heuristic::LandmarkCutLandmarks>(task_proxy); } bool LMCutConstraints::update_constraints(const State &state, lp::LPSolver &lp_solver) { assert(landmark_generator); vector<lp::LPConstraint> constraints; double infinity = lp_solver.get_infinity(); bool dead_end = landmark_generator->compute_landmarks( state, nullptr, [&](const vector<int> &op_ids, int /*cost*/) { constraints.emplace_back(1.0, infinity); lp::LPConstraint &landmark_constraint = constraints.back(); for (int op_id : op_ids) { landmark_constraint.insert(op_id, 1.0); } }); if (dead_end) { return true; } else { lp_solver.add_temporary_constraints(constraints); return false; } } static shared_ptr<ConstraintGenerator> _parse(OptionParser &parser) { parser.document_synopsis( "LM-cut landmark constraints", "Computes a set of landmarks in each state using the LM-cut method. " "For each landmark L the constraint sum_{o in L} Count_o >= 1 is added " "to the operator counting LP temporarily. After the heuristic value " "for the state is computed, all temporary constraints are removed " "again. For details, see" + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert", "Blai Bonet"}, "LP-based Heuristics for Cost-optimal Planning", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7892/8031", "Proceedings of the Twenty-Fourth International Conference" " on Automated Planning and Scheduling (ICAPS 2014)", "226-234", "AAAI Press", "2014") + utils::format_conference_reference( {"Blai Bonet"}, "An admissible heuristic for SAS+ planning obtained from the" " state equation", "http://ijcai.org/papers13/Papers/IJCAI13-335.pdf", "Proceedings of the Twenty-Third International Joint" " Conference on Artificial Intelligence (IJCAI 2013)", "2268-2274", "AAAI Press", "2013")); if (parser.dry_run()) return nullptr; return make_shared<LMCutConstraints>(); } static Plugin<ConstraintGenerator> _plugin("lmcut_constraints", _parse); }
2,990
C++
34.188235
104
0.622408
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/lm_cut_constraints.h
#ifndef OPERATOR_COUNTING_LM_CUT_CONSTRAINTS_H #define OPERATOR_COUNTING_LM_CUT_CONSTRAINTS_H #include "constraint_generator.h" #include <memory> namespace lm_cut_heuristic { class LandmarkCutLandmarks; } namespace operator_counting { class LMCutConstraints : public ConstraintGenerator { std::unique_ptr<lm_cut_heuristic::LandmarkCutLandmarks> landmark_generator; public: virtual void initialize_constraints( const std::shared_ptr<AbstractTask> &task, named_vector::NamedVector<lp::LPConstraint> &constraints, double infinity) override; virtual bool update_constraints(const State &state, lp::LPSolver &lp_solver) override; }; } #endif
714
C
26.499999
79
0.719888
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/pho_constraints.h
#ifndef OPERATOR_COUNTING_PHO_CONSTRAINTS_H #define OPERATOR_COUNTING_PHO_CONSTRAINTS_H #include "constraint_generator.h" #include "../algorithms/named_vector.h" #include "../pdbs/types.h" #include <memory> namespace options { class Options; } namespace pdbs { class PatternCollectionGenerator; } namespace operator_counting { class PhOConstraints : public ConstraintGenerator { std::shared_ptr<pdbs::PatternCollectionGenerator> pattern_generator; int constraint_offset; std::shared_ptr<pdbs::PDBCollection> pdbs; public: explicit PhOConstraints(const options::Options &opts); virtual void initialize_constraints( const std::shared_ptr<AbstractTask> &task, named_vector::NamedVector<lp::LPConstraint> &constraints, double infinity) override; virtual bool update_constraints( const State &state, lp::LPSolver &lp_solver) override; }; } #endif
908
C
22.307692
72
0.737885
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/state_equation_constraints.h
#ifndef OPERATOR_COUNTING_STATE_EQUATION_CONSTRAINTS_H #define OPERATOR_COUNTING_STATE_EQUATION_CONSTRAINTS_H #include "constraint_generator.h" #include <set> class TaskProxy; namespace lp { class LPConstraint; } namespace operator_counting { /* A proposition is an atom of the form Var = Val. It stores the index of the constraint representing it in the LP */ struct Proposition { int constraint_index; std::set<int> always_produced_by; std::set<int> sometimes_produced_by; std::set<int> always_consumed_by; Proposition() : constraint_index(-1) { } ~Proposition() = default; }; class StateEquationConstraints : public ConstraintGenerator { std::vector<std::vector<Proposition>> propositions; // Map goal variables to their goal value and other variables to max int. std::vector<int> goal_state; void build_propositions(const TaskProxy &task_proxy); void add_constraints(named_vector::NamedVector<lp::LPConstraint> &constraints, double infinity); public: virtual void initialize_constraints(const std::shared_ptr<AbstractTask> &task, named_vector::NamedVector<lp::LPConstraint> &constraints, double infinity) override; virtual bool update_constraints(const State &state, lp::LPSolver &lp_solver) override; }; } #endif
1,366
C
30.068181
100
0.697657
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/operator_counting_heuristic.h
#ifndef OPERATOR_COUNTING_OPERATOR_COUNTING_HEURISTIC_H #define OPERATOR_COUNTING_OPERATOR_COUNTING_HEURISTIC_H #include "../heuristic.h" #include "../lp/lp_solver.h" #include <memory> #include <vector> namespace options { class Options; } namespace operator_counting { class ConstraintGenerator; class OperatorCountingHeuristic : public Heuristic { std::vector<std::shared_ptr<ConstraintGenerator>> constraint_generators; lp::LPSolver lp_solver; const bool use_integer_operator_counts; protected: virtual int compute_heuristic(const State &ancestor_state) override; public: explicit OperatorCountingHeuristic(const options::Options &opts); ~OperatorCountingHeuristic(); }; } #endif
714
C
22.064515
76
0.767507
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_counting/pho_constraints.cc
#include "pho_constraints.h" #include "../option_parser.h" #include "../plugin.h" #include "../lp/lp_solver.h" #include "../pdbs/pattern_database.h" #include "../pdbs/pattern_generator.h" #include "../utils/markup.h" #include <cassert> #include <limits> #include <memory> #include <vector> using namespace std; namespace operator_counting { PhOConstraints::PhOConstraints(const Options &opts) : pattern_generator( opts.get<shared_ptr<pdbs::PatternCollectionGenerator>>("patterns")) { } void PhOConstraints::initialize_constraints( const shared_ptr<AbstractTask> &task, named_vector::NamedVector<lp::LPConstraint> &constraints, double infinity) { assert(pattern_generator); pdbs::PatternCollectionInformation pattern_collection_info = pattern_generator->generate(task); /* TODO issue590: Currently initialize_constraints should only be called once. When we separate constraint generators from constraints, we can create pattern_generator locally and no longer need to explicitly reset it. */ pattern_generator = nullptr; pdbs = pattern_collection_info.get_pdbs(); TaskProxy task_proxy(*task); constraint_offset = constraints.size(); for (const shared_ptr<pdbs::PatternDatabase> &pdb : *pdbs) { constraints.emplace_back(0, infinity); lp::LPConstraint &constraint = constraints.back(); for (OperatorProxy op : task_proxy.get_operators()) { if (pdb->is_operator_relevant(op)) { constraint.insert(op.get_id(), op.get_cost()); } } } } bool PhOConstraints::update_constraints(const State &state, lp::LPSolver &lp_solver) { state.unpack(); for (size_t i = 0; i < pdbs->size(); ++i) { int constraint_id = constraint_offset + i; shared_ptr<pdbs::PatternDatabase> pdb = (*pdbs)[i]; int h = pdb->get_value(state.get_unpacked_values()); if (h == numeric_limits<int>::max()) { return true; } lp_solver.set_constraint_lower_bound(constraint_id, h); } return false; } static shared_ptr<ConstraintGenerator> _parse(OptionParser &parser) { parser.document_synopsis( "Posthoc optimization constraints", "The generator will compute a PDB for each pattern and add the" " constraint h(s) <= sum_{o in relevant(h)} Count_o. For details," " see" + utils::format_conference_reference( {"Florian Pommerening", "Gabriele Roeger", "Malte Helmert"}, "Getting the Most Out of Pattern Databases for Classical Planning", "http://ijcai.org/papers13/Papers/IJCAI13-347.pdf", "Proceedings of the Twenty-Third International Joint" " Conference on Artificial Intelligence (IJCAI 2013)", "2357-2364", "AAAI Press", "2013")); parser.add_option<shared_ptr<pdbs::PatternCollectionGenerator>>( "patterns", "pattern generation method", "systematic(2)"); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; return make_shared<PhOConstraints>(opts); } static Plugin<ConstraintGenerator> _plugin("pho_constraints", _parse); }
3,277
C++
32.448979
79
0.639609
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/delegating_task.cc
#include "delegating_task.h" using namespace std; namespace tasks { DelegatingTask::DelegatingTask(const shared_ptr<AbstractTask> &parent) : parent(parent) { } int DelegatingTask::get_num_variables() const { return parent->get_num_variables(); } string DelegatingTask::get_variable_name(int var) const { return parent->get_variable_name(var); } int DelegatingTask::get_variable_domain_size(int var) const { return parent->get_variable_domain_size(var); } int DelegatingTask::get_variable_axiom_layer(int var) const { return parent->get_variable_axiom_layer(var); } int DelegatingTask::get_variable_default_axiom_value(int var) const { return parent->get_variable_default_axiom_value(var); } string DelegatingTask::get_fact_name(const FactPair &fact) const { return parent->get_fact_name(fact); } bool DelegatingTask::are_facts_mutex(const FactPair &fact1, const FactPair &fact2) const { return parent->are_facts_mutex(fact1, fact2); } int DelegatingTask::get_operator_cost(int index, bool is_axiom) const { return parent->get_operator_cost(index, is_axiom); } string DelegatingTask::get_operator_name(int index, bool is_axiom) const { return parent->get_operator_name(index, is_axiom); } int DelegatingTask::get_num_operators() const { return parent->get_num_operators(); } int DelegatingTask::get_num_operator_preconditions(int index, bool is_axiom) const { return parent->get_num_operator_preconditions(index, is_axiom); } FactPair DelegatingTask::get_operator_precondition( int op_index, int fact_index, bool is_axiom) const { return parent->get_operator_precondition(op_index, fact_index, is_axiom); } int DelegatingTask::get_num_operator_effects(int op_index, bool is_axiom) const { return parent->get_num_operator_effects(op_index, is_axiom); } int DelegatingTask::get_num_operator_effect_conditions( int op_index, int eff_index, bool is_axiom) const { return parent->get_num_operator_effect_conditions(op_index, eff_index, is_axiom); } FactPair DelegatingTask::get_operator_effect_condition( int op_index, int eff_index, int cond_index, bool is_axiom) const { return parent->get_operator_effect_condition(op_index, eff_index, cond_index, is_axiom); } FactPair DelegatingTask::get_operator_effect( int op_index, int eff_index, bool is_axiom) const { return parent->get_operator_effect(op_index, eff_index, is_axiom); } int DelegatingTask::convert_operator_index( int index, const AbstractTask *ancestor_task) const { if (ancestor_task == this) { return index; } int parent_index = convert_operator_index_to_parent(index); return parent->convert_operator_index(parent_index, ancestor_task); } int DelegatingTask::get_num_axioms() const { return parent->get_num_axioms(); } int DelegatingTask::get_num_goals() const { return parent->get_num_goals(); } FactPair DelegatingTask::get_goal_fact(int index) const { return parent->get_goal_fact(index); } vector<int> DelegatingTask::get_initial_state_values() const { return parent->get_initial_state_values(); } void DelegatingTask::convert_ancestor_state_values( vector<int> &values, const AbstractTask *ancestor_task) const { if (this == ancestor_task) { return; } parent->convert_ancestor_state_values(values, ancestor_task); convert_state_values_from_parent(values); } }
3,404
C++
29.401785
92
0.725911
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/modified_operator_costs_task.cc
#include "modified_operator_costs_task.h" #include <cassert> using namespace std; namespace extra_tasks { ModifiedOperatorCostsTask::ModifiedOperatorCostsTask( const shared_ptr<AbstractTask> &parent, vector<int> &&costs) : DelegatingTask(parent), operator_costs(move(costs)) { assert(static_cast<int>(operator_costs.size()) == get_num_operators()); } int ModifiedOperatorCostsTask::get_operator_cost(int index, bool is_axiom) const { // Don't change axiom costs. Usually they have cost 0, but we don't enforce this. if (is_axiom) return parent->get_operator_cost(index, is_axiom); return operator_costs[index]; } }
662
C++
26.624999
85
0.71148
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/domain_abstracted_task.cc
#include "domain_abstracted_task.h" #include "../utils/system.h" using namespace std; namespace extra_tasks { /* If we need the same functionality again in another task, we can move this to actract_task.h. We should then document that this method is only supposed to be used from within AbstractTasks. More high-level users should use has_conditional_effects(TaskProxy) from task_tools.h instead. */ static bool has_conditional_effects(const AbstractTask &task) { int num_ops = task.get_num_operators(); for (int op_index = 0; op_index < num_ops; ++op_index) { int num_effs = task.get_num_operator_effects(op_index, false); for (int eff_index = 0; eff_index < num_effs; ++eff_index) { int num_conditions = task.get_num_operator_effect_conditions( op_index, eff_index, false); if (num_conditions > 0) { return true; } } } return false; } DomainAbstractedTask::DomainAbstractedTask( const shared_ptr<AbstractTask> &parent, vector<int> &&domain_size, vector<int> &&initial_state_values, vector<FactPair> &&goals, vector<vector<string>> &&fact_names, vector<vector<int>> &&value_map) : DelegatingTask(parent), domain_size(move(domain_size)), initial_state_values(move(initial_state_values)), goals(move(goals)), fact_names(move(fact_names)), value_map(move(value_map)) { if (parent->get_num_axioms() > 0) { ABORT("DomainAbstractedTask doesn't support axioms."); } if (has_conditional_effects(*parent)) { ABORT("DomainAbstractedTask doesn't support conditional effects."); } } int DomainAbstractedTask::get_variable_domain_size(int var) const { return domain_size[var]; } string DomainAbstractedTask::get_fact_name(const FactPair &fact) const { return fact_names[fact.var][fact.value]; } bool DomainAbstractedTask::are_facts_mutex(const FactPair &, const FactPair &) const { ABORT("DomainAbstractedTask doesn't support querying mutexes."); } FactPair DomainAbstractedTask::get_operator_precondition( int op_index, int fact_index, bool is_axiom) const { return get_abstract_fact( parent->get_operator_precondition(op_index, fact_index, is_axiom)); } FactPair DomainAbstractedTask::get_operator_effect( int op_index, int eff_index, bool is_axiom) const { return get_abstract_fact( parent->get_operator_effect(op_index, eff_index, is_axiom)); } FactPair DomainAbstractedTask::get_goal_fact(int index) const { return get_abstract_fact(parent->get_goal_fact(index)); } vector<int> DomainAbstractedTask::get_initial_state_values() const { return initial_state_values; } void DomainAbstractedTask::convert_state_values_from_parent( vector<int> &values) const { int num_vars = domain_size.size(); for (int var = 0; var < num_vars; ++var) { int old_value = values[var]; int new_value = value_map[var][old_value]; values[var] = new_value; } } }
3,040
C++
32.054347
86
0.674671
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/root_task.h
#ifndef TASKS_ROOT_TASK_H #define TASKS_ROOT_TASK_H #include "../abstract_task.h" namespace tasks { extern std::shared_ptr<AbstractTask> g_root_task; extern void read_root_task(std::istream &in); } #endif
207
C
17.909089
49
0.7343
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/domain_abstracted_task_factory.cc
#include "domain_abstracted_task_factory.h" #include "domain_abstracted_task.h" #include "../task_utils/task_properties.h" #include "../utils/language.h" #include <sstream> #include <string> #include <unordered_set> using namespace std; namespace extra_tasks { class DomainAbstractedTaskFactory { private: vector<int> domain_size; vector<int> initial_state_values; vector<FactPair> goals; vector<vector<string>> fact_names; vector<vector<int>> value_map; shared_ptr<AbstractTask> task; void initialize(const AbstractTask &parent); void combine_values(int var, const ValueGroups &groups); string get_combined_fact_name(int var, const ValueGroup &values) const; public: DomainAbstractedTaskFactory( const shared_ptr<AbstractTask> &parent, const VarToGroups &value_groups); ~DomainAbstractedTaskFactory() = default; shared_ptr<AbstractTask> get_task() const; }; DomainAbstractedTaskFactory::DomainAbstractedTaskFactory( const shared_ptr<AbstractTask> &parent, const VarToGroups &value_groups) { TaskProxy parent_proxy(*parent); if (task_properties::has_axioms(parent_proxy)) { ABORT("DomainAbstractedTask doesn't support axioms."); } if (task_properties::has_conditional_effects(parent_proxy)) { ABORT("DomainAbstractedTask doesn't support conditional effects."); } initialize(*parent); for (const auto &pair : value_groups) { int var = pair.first; const ValueGroups &groups = pair.second; assert(utils::in_bounds(var, domain_size)); for (const ValueGroup &group : groups) { for (int value : group) { utils::unused_variable(value); assert(0 <= value && value < domain_size[var]); } } combine_values(var, groups); } // Apply domain abstraction to initial state. for (size_t var_id = 0; var_id < initial_state_values.size(); ++var_id) { initial_state_values[var_id] = value_map[var_id][initial_state_values[var_id]]; } // Apply domain abstraction to goals. for (FactPair &goal : goals) { goal.value = value_map[goal.var][goal.value]; } task = make_shared<DomainAbstractedTask>( parent, move(domain_size), move(initial_state_values), move(goals), move(fact_names), move(value_map)); } void DomainAbstractedTaskFactory::initialize(const AbstractTask &parent) { int num_vars = parent.get_num_variables(); domain_size.resize(num_vars); initial_state_values = parent.get_initial_state_values(); value_map.resize(num_vars); fact_names.resize(num_vars); for (int var = 0; var < num_vars; ++var) { int num_values = parent.get_variable_domain_size(var); domain_size[var] = num_values; value_map[var].resize(num_values); fact_names[var].resize(num_values); for (int value = 0; value < num_values; ++value) { value_map[var][value] = value; fact_names[var][value] = parent.get_fact_name(FactPair(var, value)); } } } string DomainAbstractedTaskFactory::get_combined_fact_name( int var, const ValueGroup &values) const { ostringstream name; string sep; for (int value : values) { name << sep << fact_names[var][value]; sep = " OR "; } return name.str(); } void DomainAbstractedTaskFactory::combine_values(int var, const ValueGroups &groups) { vector<string> combined_fact_names; unordered_set<int> groups_union; int num_merged_values = 0; for (const ValueGroup &group : groups) { combined_fact_names.push_back(get_combined_fact_name(var, group)); groups_union.insert(group.begin(), group.end()); num_merged_values += group.size(); } assert(static_cast<int>(groups_union.size()) == num_merged_values); int next_free_pos = 0; // Move all facts that are not part of groups to the front. for (int before = 0; before < domain_size[var]; ++before) { if (groups_union.count(before) == 0) { value_map[var][before] = next_free_pos; fact_names[var][next_free_pos] = move(fact_names[var][before]); ++next_free_pos; } } int num_single_values = next_free_pos; assert(num_single_values + num_merged_values == domain_size[var]); // Add new facts for merged groups. for (size_t group_id = 0; group_id < groups.size(); ++group_id) { const ValueGroup &group = groups[group_id]; for (int before : group) { value_map[var][before] = next_free_pos; } assert(utils::in_bounds(next_free_pos, fact_names[var])); fact_names[var][next_free_pos] = move(combined_fact_names[group_id]); ++next_free_pos; } int new_domain_size = num_single_values + static_cast<int>(groups.size()); assert(next_free_pos == new_domain_size); // Update domain size. fact_names[var].resize(new_domain_size); domain_size[var] = new_domain_size; } shared_ptr<AbstractTask> DomainAbstractedTaskFactory::get_task() const { return task; } shared_ptr<AbstractTask> build_domain_abstracted_task( const shared_ptr<AbstractTask> &parent, const VarToGroups &value_groups) { return DomainAbstractedTaskFactory(parent, value_groups).get_task(); } }
5,345
C++
32.835443
87
0.644715
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/delegating_task.h
#ifndef TASKS_DELEGATING_TASK_H #define TASKS_DELEGATING_TASK_H #include "../abstract_task.h" #include <memory> #include <string> #include <utility> #include <vector> namespace tasks { /* Task transformation that delegates all calls to the corresponding methods of the parent task. You should inherit from this class instead of AbstractTask if you need specialized behavior for only some of the methods. */ class DelegatingTask : public AbstractTask { protected: const std::shared_ptr<AbstractTask> parent; public: explicit DelegatingTask(const std::shared_ptr<AbstractTask> &parent); virtual ~DelegatingTask() override = default; virtual int get_num_variables() const override; virtual std::string get_variable_name(int var) const override; virtual int get_variable_domain_size(int var) const override; virtual int get_variable_axiom_layer(int var) const override; virtual int get_variable_default_axiom_value(int var) const override; virtual std::string get_fact_name(const FactPair &fact) const override; virtual bool are_facts_mutex( const FactPair &fact1, const FactPair &fact2) const override; virtual int get_operator_cost(int index, bool is_axiom) const override; virtual std::string get_operator_name(int index, bool is_axiom) const override; virtual int get_num_operators() const override; virtual int get_num_operator_preconditions(int index, bool is_axiom) const override; virtual FactPair get_operator_precondition( int op_index, int fact_index, bool is_axiom) const override; virtual int get_num_operator_effects(int op_index, bool is_axiom) const override; virtual int get_num_operator_effect_conditions( int op_index, int eff_index, bool is_axiom) const override; virtual FactPair get_operator_effect_condition( int op_index, int eff_index, int cond_index, bool is_axiom) const override; virtual FactPair get_operator_effect( int op_index, int eff_index, bool is_axiom) const override; virtual int convert_operator_index( int index, const AbstractTask *ancestor_task) const final override; virtual int convert_operator_index_to_parent(int index) const { return index; } virtual int get_num_axioms() const override; virtual int get_num_goals() const override; virtual FactPair get_goal_fact(int index) const override; virtual std::vector<int> get_initial_state_values() const override; virtual void convert_ancestor_state_values( std::vector<int> &values, const AbstractTask *ancestor_task) const final override; virtual void convert_state_values_from_parent(std::vector<int> &) const { } }; } #endif
2,720
C
39.014705
88
0.73125
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/domain_abstracted_task.h
#ifndef TASKS_DOMAIN_ABSTRACTED_TASK_H #define TASKS_DOMAIN_ABSTRACTED_TASK_H #include "delegating_task.h" #include "../utils/collections.h" #include <cassert> #include <string> #include <utility> #include <vector> namespace extra_tasks { /* Task transformation for performing domain abstraction. We recommend using the factory function in domain_abstracted_task_factory.h for creating DomainAbstractedTasks. */ class DomainAbstractedTask : public tasks::DelegatingTask { const std::vector<int> domain_size; const std::vector<int> initial_state_values; const std::vector<FactPair> goals; const std::vector<std::vector<std::string>> fact_names; const std::vector<std::vector<int>> value_map; int get_abstract_value(const FactPair &fact) const { assert(utils::in_bounds(fact.var, value_map)); assert(utils::in_bounds(fact.value, value_map[fact.var])); return value_map[fact.var][fact.value]; } FactPair get_abstract_fact(const FactPair &fact) const { return FactPair(fact.var, get_abstract_value(fact)); } public: DomainAbstractedTask( const std::shared_ptr<AbstractTask> &parent, std::vector<int> &&domain_size, std::vector<int> &&initial_state_values, std::vector<FactPair> &&goals, std::vector<std::vector<std::string>> &&fact_names, std::vector<std::vector<int>> &&value_map); virtual int get_variable_domain_size(int var) const override; virtual std::string get_fact_name(const FactPair &fact) const override; virtual bool are_facts_mutex( const FactPair &fact1, const FactPair &fact2) const override; virtual FactPair get_operator_precondition( int op_index, int fact_index, bool is_axiom) const override; virtual FactPair get_operator_effect( int op_index, int eff_index, bool is_axiom) const override; virtual FactPair get_goal_fact(int index) const override; virtual std::vector<int> get_initial_state_values() const override; virtual void convert_state_values_from_parent( std::vector<int> &values) const override; }; } #endif
2,138
C
31.907692
75
0.697381
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/modified_operator_costs_task.h
#ifndef TASKS_MODIFIED_OPERATOR_COSTS_TASK_H #define TASKS_MODIFIED_OPERATOR_COSTS_TASK_H #include "delegating_task.h" #include <vector> namespace extra_tasks { class ModifiedOperatorCostsTask : public tasks::DelegatingTask { const std::vector<int> operator_costs; public: ModifiedOperatorCostsTask( const std::shared_ptr<AbstractTask> &parent, std::vector<int> &&costs); virtual ~ModifiedOperatorCostsTask() override = default; virtual int get_operator_cost(int index, bool is_axiom) const override; }; } #endif
551
C
22.999999
75
0.735027
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/cost_adapted_task.h
#ifndef TASKS_COST_ADAPTED_TASK_H #define TASKS_COST_ADAPTED_TASK_H #include "delegating_task.h" #include "../operator_cost.h" namespace options { class Options; } namespace tasks { /* Task transformation that changes operator costs. If the parent task assigns costs 'c' to an operator, its adjusted costs, depending on the value of the cost_type option, are: NORMAL: c ONE: 1 PLUSONE: 1, if all operators have cost 1 in the parent task, else c + 1 Regardless of the cost_type value, axioms will always keep their original cost, which is 0 by default. */ class CostAdaptedTask : public DelegatingTask { const OperatorCost cost_type; const bool parent_is_unit_cost; public: CostAdaptedTask( const std::shared_ptr<AbstractTask> &parent, OperatorCost cost_type); virtual ~CostAdaptedTask() override = default; virtual int get_operator_cost(int index, bool is_axiom) const override; }; } #endif
962
C
23.692307
77
0.714137
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/modified_goals_task.cc
#include "modified_goals_task.h" using namespace std; namespace extra_tasks { ModifiedGoalsTask::ModifiedGoalsTask( const shared_ptr<AbstractTask> &parent, vector<FactPair> &&goals) : DelegatingTask(parent), goals(move(goals)) { } int ModifiedGoalsTask::get_num_goals() const { return goals.size(); } FactPair ModifiedGoalsTask::get_goal_fact(int index) const { return goals[index]; } }
417
C++
18.904761
60
0.709832
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/root_task.cc
#include "root_task.h" #include "../option_parser.h" #include "../plugin.h" #include "../state_registry.h" #include "../utils/collections.h" #include "../utils/timer.h" #include <algorithm> #include <cassert> #include <memory> #include <set> #include <unordered_set> #include <vector> using namespace std; using utils::ExitCode; namespace tasks { static const int PRE_FILE_VERSION = 3; shared_ptr<AbstractTask> g_root_task = nullptr; struct ExplicitVariable { int domain_size; string name; vector<string> fact_names; int axiom_layer; int axiom_default_value; explicit ExplicitVariable(istream &in); }; struct ExplicitEffect { FactPair fact; vector<FactPair> conditions; ExplicitEffect(int var, int value, vector<FactPair> &&conditions); }; struct ExplicitOperator { vector<FactPair> preconditions; vector<ExplicitEffect> effects; int cost; string name; bool is_an_axiom; void read_pre_post(istream &in); ExplicitOperator(istream &in, bool is_an_axiom, bool use_metric); }; class RootTask : public AbstractTask { vector<ExplicitVariable> variables; // TODO: think about using hash sets here. vector<vector<set<FactPair>>> mutexes; vector<ExplicitOperator> operators; vector<ExplicitOperator> axioms; vector<int> initial_state_values; vector<FactPair> goals; const ExplicitVariable &get_variable(int var) const; const ExplicitEffect &get_effect(int op_id, int effect_id, bool is_axiom) const; const ExplicitOperator &get_operator_or_axiom(int index, bool is_axiom) const; public: explicit RootTask(istream &in); virtual int get_num_variables() const override; virtual string get_variable_name(int var) const override; virtual int get_variable_domain_size(int var) const override; virtual int get_variable_axiom_layer(int var) const override; virtual int get_variable_default_axiom_value(int var) const override; virtual string get_fact_name(const FactPair &fact) const override; virtual bool are_facts_mutex( const FactPair &fact1, const FactPair &fact2) const override; virtual int get_operator_cost(int index, bool is_axiom) const override; virtual string get_operator_name( int index, bool is_axiom) const override; virtual int get_num_operators() const override; virtual int get_num_operator_preconditions( int index, bool is_axiom) const override; virtual FactPair get_operator_precondition( int op_index, int fact_index, bool is_axiom) const override; virtual int get_num_operator_effects( int op_index, bool is_axiom) const override; virtual int get_num_operator_effect_conditions( int op_index, int eff_index, bool is_axiom) const override; virtual FactPair get_operator_effect_condition( int op_index, int eff_index, int cond_index, bool is_axiom) const override; virtual FactPair get_operator_effect( int op_index, int eff_index, bool is_axiom) const override; virtual int convert_operator_index( int index, const AbstractTask *ancestor_task) const override; virtual int get_num_axioms() const override; virtual int get_num_goals() const override; virtual FactPair get_goal_fact(int index) const override; virtual vector<int> get_initial_state_values() const override; virtual void convert_ancestor_state_values( vector<int> &values, const AbstractTask *ancestor_task) const override; }; static void check_fact(const FactPair &fact, const vector<ExplicitVariable> &variables) { if (!utils::in_bounds(fact.var, variables)) { cerr << "Invalid variable id: " << fact.var << endl; utils::exit_with(ExitCode::SEARCH_INPUT_ERROR); } if (fact.value < 0 || fact.value >= variables[fact.var].domain_size) { cerr << "Invalid value for variable " << fact.var << ": " << fact.value << endl; utils::exit_with(ExitCode::SEARCH_INPUT_ERROR); } } static void check_facts(const vector<FactPair> &facts, const vector<ExplicitVariable> &variables) { for (FactPair fact : facts) { check_fact(fact, variables); } } static void check_facts(const ExplicitOperator &action, const vector<ExplicitVariable> &variables) { check_facts(action.preconditions, variables); for (const ExplicitEffect &eff : action.effects) { check_fact(eff.fact, variables); check_facts(eff.conditions, variables); } } void check_magic(istream &in, const string &magic) { string word; in >> word; if (word != magic) { cerr << "Failed to match magic word '" << magic << "'." << endl << "Got '" << word << "'." << endl; if (magic == "begin_version") { cerr << "Possible cause: you are running the planner " << "on a translator output file from " << endl << "an older version." << endl; } utils::exit_with(ExitCode::SEARCH_INPUT_ERROR); } } vector<FactPair> read_facts(istream &in) { int count; in >> count; vector<FactPair> conditions; conditions.reserve(count); for (int i = 0; i < count; ++i) { FactPair condition = FactPair::no_fact; in >> condition.var >> condition.value; conditions.push_back(condition); } return conditions; } ExplicitVariable::ExplicitVariable(istream &in) { check_magic(in, "begin_variable"); in >> name; in >> axiom_layer; in >> domain_size; in >> ws; fact_names.resize(domain_size); for (int i = 0; i < domain_size; ++i) getline(in, fact_names[i]); check_magic(in, "end_variable"); } ExplicitEffect::ExplicitEffect( int var, int value, vector<FactPair> &&conditions) : fact(var, value), conditions(move(conditions)) { } void ExplicitOperator::read_pre_post(istream &in) { vector<FactPair> conditions = read_facts(in); int var, value_pre, value_post; in >> var >> value_pre >> value_post; if (value_pre != -1) { preconditions.emplace_back(var, value_pre); } effects.emplace_back(var, value_post, move(conditions)); } ExplicitOperator::ExplicitOperator(istream &in, bool is_an_axiom, bool use_metric) : is_an_axiom(is_an_axiom) { if (!is_an_axiom) { check_magic(in, "begin_operator"); in >> ws; getline(in, name); preconditions = read_facts(in); int count; in >> count; effects.reserve(count); for (int i = 0; i < count; ++i) { read_pre_post(in); } int op_cost; in >> op_cost; cost = use_metric ? op_cost : 1; check_magic(in, "end_operator"); } else { name = "<axiom>"; cost = 0; check_magic(in, "begin_rule"); read_pre_post(in); check_magic(in, "end_rule"); } assert(cost >= 0); } void read_and_verify_version(istream &in) { int version; check_magic(in, "begin_version"); in >> version; check_magic(in, "end_version"); if (version != PRE_FILE_VERSION) { cerr << "Expected translator output file version " << PRE_FILE_VERSION << ", got " << version << "." << endl << "Exiting." << endl; utils::exit_with(ExitCode::SEARCH_INPUT_ERROR); } } bool read_metric(istream &in) { bool use_metric; check_magic(in, "begin_metric"); in >> use_metric; check_magic(in, "end_metric"); return use_metric; } vector<ExplicitVariable> read_variables(istream &in) { int count; in >> count; vector<ExplicitVariable> variables; variables.reserve(count); for (int i = 0; i < count; ++i) { variables.emplace_back(in); } return variables; } vector<vector<set<FactPair>>> read_mutexes(istream &in, const vector<ExplicitVariable> &variables) { vector<vector<set<FactPair>>> inconsistent_facts(variables.size()); for (size_t i = 0; i < variables.size(); ++i) inconsistent_facts[i].resize(variables[i].domain_size); int num_mutex_groups; in >> num_mutex_groups; /* NOTE: Mutex groups can overlap, in which case the same mutex should not be represented multiple times. The current representation takes care of that automatically by using sets. If we ever change this representation, this is something to be aware of. */ for (int i = 0; i < num_mutex_groups; ++i) { check_magic(in, "begin_mutex_group"); int num_facts; in >> num_facts; vector<FactPair> invariant_group; invariant_group.reserve(num_facts); for (int j = 0; j < num_facts; ++j) { int var; int value; in >> var >> value; invariant_group.emplace_back(var, value); } check_magic(in, "end_mutex_group"); for (const FactPair &fact1 : invariant_group) { for (const FactPair &fact2 : invariant_group) { if (fact1.var != fact2.var) { /* The "different variable" test makes sure we don't mark a fact as mutex with itself (important for correctness) and don't include redundant mutexes (important to conserve memory). Note that the translator (at least with default settings) removes mutex groups that contain *only* redundant mutexes, but it can of course generate mutex groups which lead to *some* redundant mutexes, where some but not all facts talk about the same variable. */ inconsistent_facts[fact1.var][fact1.value].insert(fact2); } } } } return inconsistent_facts; } vector<FactPair> read_goal(istream &in) { check_magic(in, "begin_goal"); vector<FactPair> goals = read_facts(in); check_magic(in, "end_goal"); if (goals.empty()) { cerr << "Task has no goal condition!" << endl; utils::exit_with(ExitCode::SEARCH_INPUT_ERROR); } return goals; } vector<ExplicitOperator> read_actions( istream &in, bool is_axiom, bool use_metric, const vector<ExplicitVariable> &variables) { int count; in >> count; vector<ExplicitOperator> actions; actions.reserve(count); for (int i = 0; i < count; ++i) { actions.emplace_back(in, is_axiom, use_metric); check_facts(actions.back(), variables); } return actions; } RootTask::RootTask(istream &in) { read_and_verify_version(in); bool use_metric = read_metric(in); variables = read_variables(in); int num_variables = variables.size(); mutexes = read_mutexes(in, variables); initial_state_values.resize(num_variables); check_magic(in, "begin_state"); for (int i = 0; i < num_variables; ++i) { in >> initial_state_values[i]; } check_magic(in, "end_state"); for (int i = 0; i < num_variables; ++i) { variables[i].axiom_default_value = initial_state_values[i]; } goals = read_goal(in); check_facts(goals, variables); operators = read_actions(in, false, use_metric, variables); axioms = read_actions(in, true, use_metric, variables); /* TODO: We should be stricter here and verify that we have reached the end of "in". */ /* HACK: We use a TaskProxy to access g_axiom_evaluators here which assumes that this task is completely constructed. */ AxiomEvaluator &axiom_evaluator = g_axiom_evaluators[TaskProxy(*this)]; axiom_evaluator.evaluate(initial_state_values); } const ExplicitVariable &RootTask::get_variable(int var) const { assert(utils::in_bounds(var, variables)); return variables[var]; } const ExplicitEffect &RootTask::get_effect( int op_id, int effect_id, bool is_axiom) const { const ExplicitOperator &op = get_operator_or_axiom(op_id, is_axiom); assert(utils::in_bounds(effect_id, op.effects)); return op.effects[effect_id]; } const ExplicitOperator &RootTask::get_operator_or_axiom( int index, bool is_axiom) const { if (is_axiom) { assert(utils::in_bounds(index, axioms)); return axioms[index]; } else { assert(utils::in_bounds(index, operators)); return operators[index]; } } int RootTask::get_num_variables() const { return variables.size(); } string RootTask::get_variable_name(int var) const { return get_variable(var).name; } int RootTask::get_variable_domain_size(int var) const { return get_variable(var).domain_size; } int RootTask::get_variable_axiom_layer(int var) const { return get_variable(var).axiom_layer; } int RootTask::get_variable_default_axiom_value(int var) const { return get_variable(var).axiom_default_value; } string RootTask::get_fact_name(const FactPair &fact) const { assert(utils::in_bounds(fact.value, get_variable(fact.var).fact_names)); return get_variable(fact.var).fact_names[fact.value]; } bool RootTask::are_facts_mutex(const FactPair &fact1, const FactPair &fact2) const { if (fact1.var == fact2.var) { // Same variable: mutex iff different value. return fact1.value != fact2.value; } assert(utils::in_bounds(fact1.var, mutexes)); assert(utils::in_bounds(fact1.value, mutexes[fact1.var])); return bool(mutexes[fact1.var][fact1.value].count(fact2)); } int RootTask::get_operator_cost(int index, bool is_axiom) const { return get_operator_or_axiom(index, is_axiom).cost; } string RootTask::get_operator_name(int index, bool is_axiom) const { return get_operator_or_axiom(index, is_axiom).name; } int RootTask::get_num_operators() const { return operators.size(); } int RootTask::get_num_operator_preconditions(int index, bool is_axiom) const { return get_operator_or_axiom(index, is_axiom).preconditions.size(); } FactPair RootTask::get_operator_precondition( int op_index, int fact_index, bool is_axiom) const { const ExplicitOperator &op = get_operator_or_axiom(op_index, is_axiom); assert(utils::in_bounds(fact_index, op.preconditions)); return op.preconditions[fact_index]; } int RootTask::get_num_operator_effects(int op_index, bool is_axiom) const { return get_operator_or_axiom(op_index, is_axiom).effects.size(); } int RootTask::get_num_operator_effect_conditions( int op_index, int eff_index, bool is_axiom) const { return get_effect(op_index, eff_index, is_axiom).conditions.size(); } FactPair RootTask::get_operator_effect_condition( int op_index, int eff_index, int cond_index, bool is_axiom) const { const ExplicitEffect &effect = get_effect(op_index, eff_index, is_axiom); assert(utils::in_bounds(cond_index, effect.conditions)); return effect.conditions[cond_index]; } FactPair RootTask::get_operator_effect( int op_index, int eff_index, bool is_axiom) const { return get_effect(op_index, eff_index, is_axiom).fact; } int RootTask::convert_operator_index( int index, const AbstractTask *ancestor_task) const { if (this != ancestor_task) { ABORT("Invalid operator ID conversion"); } return index; } int RootTask::get_num_axioms() const { return axioms.size(); } int RootTask::get_num_goals() const { return goals.size(); } FactPair RootTask::get_goal_fact(int index) const { assert(utils::in_bounds(index, goals)); return goals[index]; } vector<int> RootTask::get_initial_state_values() const { return initial_state_values; } void RootTask::convert_ancestor_state_values( vector<int> &, const AbstractTask *ancestor_task) const { if (this != ancestor_task) { ABORT("Invalid state conversion"); } } void read_root_task(istream &in) { assert(!g_root_task); g_root_task = make_shared<RootTask>(in); } static shared_ptr<AbstractTask> _parse(OptionParser &parser) { if (parser.dry_run()) return nullptr; else return g_root_task; } static Plugin<AbstractTask> _plugin("no_transform", _parse); }
16,097
C++
30.564706
100
0.644903
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/domain_abstracted_task_factory.h
#ifndef TASKS_DOMAIN_ABSTRACTED_TASK_FACTORY_H #define TASKS_DOMAIN_ABSTRACTED_TASK_FACTORY_H #include <memory> #include <unordered_map> #include <vector> class AbstractTask; namespace extra_tasks { using ValueGroup = std::vector<int>; using ValueGroups = std::vector<ValueGroup>; using VarToGroups = std::unordered_map<int, ValueGroups>; /* Factory for creating domain abstractions. */ std::shared_ptr<AbstractTask> build_domain_abstracted_task( const std::shared_ptr<AbstractTask> &parent, const VarToGroups &value_groups); } #endif
551
C
21.079999
59
0.758621
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/cost_adapted_task.cc
#include "cost_adapted_task.h" #include "../operator_cost.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_utils/task_properties.h" #include "../tasks/root_task.h" #include "../utils/system.h" #include <iostream> #include <memory> using namespace std; using utils::ExitCode; namespace tasks { CostAdaptedTask::CostAdaptedTask( const shared_ptr<AbstractTask> &parent, OperatorCost cost_type) : DelegatingTask(parent), cost_type(cost_type), parent_is_unit_cost(task_properties::is_unit_cost(TaskProxy(*parent))) { } int CostAdaptedTask::get_operator_cost(int index, bool is_axiom) const { OperatorProxy op(*parent, index, is_axiom); return get_adjusted_action_cost(op, cost_type, parent_is_unit_cost); } static shared_ptr<AbstractTask> _parse(OptionParser &parser) { parser.document_synopsis( "Cost-adapted task", "A cost-adapting transformation of the root task."); add_cost_type_option_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) { return nullptr; } else { OperatorCost cost_type = opts.get<OperatorCost>("cost_type"); return make_shared<CostAdaptedTask>(g_root_task, cost_type); } } static Plugin<AbstractTask> _plugin("adapt_costs", _parse); }
1,304
C++
26.187499
78
0.684049
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/tasks/modified_goals_task.h
#ifndef TASKS_MODIFIED_GOALS_TASK_H #define TASKS_MODIFIED_GOALS_TASK_H #include "delegating_task.h" #include <vector> namespace extra_tasks { class ModifiedGoalsTask : public tasks::DelegatingTask { const std::vector<FactPair> goals; public: ModifiedGoalsTask( const std::shared_ptr<AbstractTask> &parent, std::vector<FactPair> &&goals); ~ModifiedGoalsTask() = default; virtual int get_num_goals() const override; virtual FactPair get_goal_fact(int index) const override; }; } #endif
527
C
20.999999
61
0.713472
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/ff_heuristic.cc
#include "ff_heuristic.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_utils/task_properties.h" #include "../utils/logging.h" #include <cassert> using namespace std; namespace ff_heuristic { // construction and destruction FFHeuristic::FFHeuristic(const Options &opts) : AdditiveHeuristic(opts), relaxed_plan(task_proxy.get_operators().size(), false) { utils::g_log << "Initializing FF heuristic..." << endl; } void FFHeuristic::mark_preferred_operators_and_relaxed_plan( const State &state, PropID goal_id) { Proposition *goal = get_proposition(goal_id); if (!goal->marked) { // Only consider each subgoal once. goal->marked = true; OpID op_id = goal->reached_by; if (op_id != NO_OP) { // We have not yet chained back to a start node. UnaryOperator *unary_op = get_operator(op_id); bool is_preferred = true; for (PropID precond : get_preconditions(op_id)) { mark_preferred_operators_and_relaxed_plan( state, precond); if (get_proposition(precond)->reached_by != NO_OP) { is_preferred = false; } } int operator_no = unary_op->operator_no; if (operator_no != -1) { // This is not an axiom. relaxed_plan[operator_no] = true; if (is_preferred) { OperatorProxy op = task_proxy.get_operators()[operator_no]; assert(task_properties::is_applicable(op, state)); set_preferred(op); } } } } } int FFHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); int h_add = compute_add_and_ff(state); if (h_add == DEAD_END) return h_add; // Collecting the relaxed plan also sets the preferred operators. for (PropID goal_id : goal_propositions) mark_preferred_operators_and_relaxed_plan(state, goal_id); int h_ff = 0; for (size_t op_no = 0; op_no < relaxed_plan.size(); ++op_no) { if (relaxed_plan[op_no]) { relaxed_plan[op_no] = false; // Clean up for next computation. h_ff += task_proxy.get_operators()[op_no].get_cost(); } } return h_ff; } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("FF heuristic", ""); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "supported"); parser.document_language_support( "axioms", "supported (in the sense that the planner won't complain -- " "handling of axioms might be very stupid " "and even render the heuristic unsafe)"); parser.document_property("admissible", "no"); parser.document_property("consistent", "no"); parser.document_property("safe", "yes for tasks without axioms"); parser.document_property("preferred operators", "yes"); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<FFHeuristic>(opts); } static Plugin<Evaluator> _plugin("ff", _parse); }
3,321
C++
33.604166
79
0.60283
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/lm_cut_landmarks.cc
#include "lm_cut_landmarks.h" #include "../task_utils/task_properties.h" #include <algorithm> #include <limits> #include <utility> using namespace std; namespace lm_cut_heuristic { // construction and destruction LandmarkCutLandmarks::LandmarkCutLandmarks(const TaskProxy &task_proxy) { task_properties::verify_no_axioms(task_proxy); task_properties::verify_no_conditional_effects(task_proxy); // Build propositions. num_propositions = 2; // artificial goal and artificial precondition VariablesProxy variables = task_proxy.get_variables(); propositions.resize(variables.size()); for (FactProxy fact : variables.get_facts()) { int var_id = fact.get_variable().get_id(); propositions[var_id].push_back(RelaxedProposition()); ++num_propositions; } // Build relaxed operators for operators and axioms. for (OperatorProxy op : task_proxy.get_operators()) build_relaxed_operator(op); // Simplify relaxed operators. // simplify(); /* TODO: Put this back in and test if it makes sense, but only after trying out whether and how much the change to unary operators hurts. */ // Build artificial goal proposition and operator. vector<RelaxedProposition *> goal_op_pre, goal_op_eff; for (FactProxy goal : task_proxy.get_goals()) { goal_op_pre.push_back(get_proposition(goal)); } goal_op_eff.push_back(&artificial_goal); /* Use the invalid operator ID -1 so accessing the artificial operator will generate an error. */ add_relaxed_operator(move(goal_op_pre), move(goal_op_eff), -1, 0); // Cross-reference relaxed operators. for (RelaxedOperator &op : relaxed_operators) { for (RelaxedProposition *pre : op.preconditions) pre->precondition_of.push_back(&op); for (RelaxedProposition *eff : op.effects) eff->effect_of.push_back(&op); } } LandmarkCutLandmarks::~LandmarkCutLandmarks() { } void LandmarkCutLandmarks::build_relaxed_operator(const OperatorProxy &op) { vector<RelaxedProposition *> precondition; vector<RelaxedProposition *> effects; for (FactProxy pre : op.get_preconditions()) { precondition.push_back(get_proposition(pre)); } for (EffectProxy eff : op.get_effects()) { effects.push_back(get_proposition(eff.get_fact())); } add_relaxed_operator( move(precondition), move(effects), op.get_id(), op.get_cost()); } void LandmarkCutLandmarks::add_relaxed_operator( vector<RelaxedProposition *> &&precondition, vector<RelaxedProposition *> &&effects, int op_id, int base_cost) { RelaxedOperator relaxed_op( move(precondition), move(effects), op_id, base_cost); if (relaxed_op.preconditions.empty()) relaxed_op.preconditions.push_back(&artificial_precondition); relaxed_operators.push_back(relaxed_op); } RelaxedProposition *LandmarkCutLandmarks::get_proposition( const FactProxy &fact) { int var_id = fact.get_variable().get_id(); int val = fact.get_value(); return &propositions[var_id][val]; } // heuristic computation void LandmarkCutLandmarks::setup_exploration_queue() { priority_queue.clear(); for (auto &var_props : propositions) { for (RelaxedProposition &prop : var_props) { prop.status = UNREACHED; } } artificial_goal.status = UNREACHED; artificial_precondition.status = UNREACHED; for (RelaxedOperator &op : relaxed_operators) { op.unsatisfied_preconditions = op.preconditions.size(); op.h_max_supporter = 0; op.h_max_supporter_cost = numeric_limits<int>::max(); } } void LandmarkCutLandmarks::setup_exploration_queue_state(const State &state) { for (FactProxy init_fact : state) { enqueue_if_necessary(get_proposition(init_fact), 0); } enqueue_if_necessary(&artificial_precondition, 0); } void LandmarkCutLandmarks::first_exploration(const State &state) { assert(priority_queue.empty()); setup_exploration_queue(); setup_exploration_queue_state(state); while (!priority_queue.empty()) { pair<int, RelaxedProposition *> top_pair = priority_queue.pop(); int popped_cost = top_pair.first; RelaxedProposition *prop = top_pair.second; int prop_cost = prop->h_max_cost; assert(prop_cost <= popped_cost); if (prop_cost < popped_cost) continue; const vector<RelaxedOperator *> &triggered_operators = prop->precondition_of; for (RelaxedOperator *relaxed_op : triggered_operators) { --relaxed_op->unsatisfied_preconditions; assert(relaxed_op->unsatisfied_preconditions >= 0); if (relaxed_op->unsatisfied_preconditions == 0) { relaxed_op->h_max_supporter = prop; relaxed_op->h_max_supporter_cost = prop_cost; int target_cost = prop_cost + relaxed_op->cost; for (RelaxedProposition *effect : relaxed_op->effects) { enqueue_if_necessary(effect, target_cost); } } } } } void LandmarkCutLandmarks::first_exploration_incremental( vector<RelaxedOperator *> &cut) { assert(priority_queue.empty()); /* We pretend that this queue has had as many pushes already as we have propositions to avoid switching from bucket-based to heap-based too aggressively. This should prevent ever switching to heap-based in problems where action costs are at most 1. */ priority_queue.add_virtual_pushes(num_propositions); for (RelaxedOperator *relaxed_op : cut) { int cost = relaxed_op->h_max_supporter_cost + relaxed_op->cost; for (RelaxedProposition *effect : relaxed_op->effects) enqueue_if_necessary(effect, cost); } while (!priority_queue.empty()) { pair<int, RelaxedProposition *> top_pair = priority_queue.pop(); int popped_cost = top_pair.first; RelaxedProposition *prop = top_pair.second; int prop_cost = prop->h_max_cost; assert(prop_cost <= popped_cost); if (prop_cost < popped_cost) continue; const vector<RelaxedOperator *> &triggered_operators = prop->precondition_of; for (RelaxedOperator *relaxed_op : triggered_operators) { if (relaxed_op->h_max_supporter == prop) { int old_supp_cost = relaxed_op->h_max_supporter_cost; if (old_supp_cost > prop_cost) { relaxed_op->update_h_max_supporter(); int new_supp_cost = relaxed_op->h_max_supporter_cost; if (new_supp_cost != old_supp_cost) { // This operator has become cheaper. assert(new_supp_cost < old_supp_cost); int target_cost = new_supp_cost + relaxed_op->cost; for (RelaxedProposition *effect : relaxed_op->effects) enqueue_if_necessary(effect, target_cost); } } } } } } void LandmarkCutLandmarks::second_exploration( const State &state, vector<RelaxedProposition *> &second_exploration_queue, vector<RelaxedOperator *> &cut) { assert(second_exploration_queue.empty()); assert(cut.empty()); artificial_precondition.status = BEFORE_GOAL_ZONE; second_exploration_queue.push_back(&artificial_precondition); for (FactProxy init_fact : state) { RelaxedProposition *init_prop = get_proposition(init_fact); init_prop->status = BEFORE_GOAL_ZONE; second_exploration_queue.push_back(init_prop); } while (!second_exploration_queue.empty()) { RelaxedProposition *prop = second_exploration_queue.back(); second_exploration_queue.pop_back(); const vector<RelaxedOperator *> &triggered_operators = prop->precondition_of; for (RelaxedOperator *relaxed_op : triggered_operators) { if (relaxed_op->h_max_supporter == prop) { bool reached_goal_zone = false; for (RelaxedProposition *effect : relaxed_op->effects) { if (effect->status == GOAL_ZONE) { assert(relaxed_op->cost > 0); reached_goal_zone = true; cut.push_back(relaxed_op); break; } } if (!reached_goal_zone) { for (RelaxedProposition *effect : relaxed_op->effects) { if (effect->status != BEFORE_GOAL_ZONE) { assert(effect->status == REACHED); effect->status = BEFORE_GOAL_ZONE; second_exploration_queue.push_back(effect); } } } } } } } void LandmarkCutLandmarks::mark_goal_plateau(RelaxedProposition *subgoal) { // NOTE: subgoal can be null if we got here via recursion through // a zero-cost action that is relaxed unreachable. (This can only // happen in domains which have zero-cost actions to start with.) // For example, this happens in pegsol-strips #01. if (subgoal && subgoal->status != GOAL_ZONE) { subgoal->status = GOAL_ZONE; for (RelaxedOperator *achiever : subgoal->effect_of) if (achiever->cost == 0) mark_goal_plateau(achiever->h_max_supporter); } } void LandmarkCutLandmarks::validate_h_max() const { #ifndef NDEBUG // Using conditional compilation to avoid complaints about unused // variables when using NDEBUG. This whole code does nothing useful // when assertions are switched off anyway. for (const RelaxedOperator &op : relaxed_operators) { if (op.unsatisfied_preconditions) { bool reachable = true; for (RelaxedProposition *pre : op.preconditions) { if (pre->status == UNREACHED) { reachable = false; break; } } assert(!reachable); assert(!op.h_max_supporter); } else { assert(op.h_max_supporter); int h_max_cost = op.h_max_supporter_cost; assert(h_max_cost == op.h_max_supporter->h_max_cost); for (RelaxedProposition *pre : op.preconditions) { assert(pre->status != UNREACHED); assert(pre->h_max_cost <= h_max_cost); } } } #endif } bool LandmarkCutLandmarks::compute_landmarks( const State &state, CostCallback cost_callback, LandmarkCallback landmark_callback) { for (RelaxedOperator &op : relaxed_operators) { op.cost = op.base_cost; } // The following three variables could be declared inside the loop // ("second_exploration_queue" even inside second_exploration), // but having them here saves reallocations and hence provides a // measurable speed boost. vector<RelaxedOperator *> cut; Landmark landmark; vector<RelaxedProposition *> second_exploration_queue; first_exploration(state); // validate_h_max(); // too expensive to use even in regular debug mode if (artificial_goal.status == UNREACHED) return true; int num_iterations = 0; while (artificial_goal.h_max_cost != 0) { ++num_iterations; mark_goal_plateau(&artificial_goal); assert(cut.empty()); second_exploration(state, second_exploration_queue, cut); assert(!cut.empty()); int cut_cost = numeric_limits<int>::max(); for (RelaxedOperator *op : cut) cut_cost = min(cut_cost, op->cost); for (RelaxedOperator *op : cut) op->cost -= cut_cost; if (cost_callback) { cost_callback(cut_cost); } if (landmark_callback) { landmark.clear(); for (RelaxedOperator *op : cut) { landmark.push_back(op->original_op_id); } landmark_callback(landmark, cut_cost); } first_exploration_incremental(cut); // validate_h_max(); // too expensive to use even in regular debug mode cut.clear(); /* Note: This could perhaps be made more efficient, for example by using a round-dependent counter for GOAL_ZONE and BEFORE_GOAL_ZONE, or something based on total_cost, so that we don't need a per-round reinitialization. */ for (auto &var_props : propositions) { for (RelaxedProposition &prop : var_props) { if (prop.status == GOAL_ZONE || prop.status == BEFORE_GOAL_ZONE) prop.status = REACHED; } } artificial_goal.status = REACHED; artificial_precondition.status = REACHED; } return false; } }
13,011
C++
37.270588
80
0.607563
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/cea_heuristic.h
#ifndef HEURISTICS_CEA_HEURISTIC_H #define HEURISTICS_CEA_HEURISTIC_H #include "domain_transition_graph.h" #include "../heuristic.h" #include "../algorithms/priority_queues.h" #include <vector> class State; namespace cea_heuristic { struct LocalProblem; struct LocalProblemNode; struct LocalTransition; class ContextEnhancedAdditiveHeuristic : public Heuristic { std::vector<std::unique_ptr<domain_transition_graph::DomainTransitionGraph>> transition_graphs; std::vector<LocalProblem *> local_problems; std::vector<std::vector<LocalProblem *>> local_problem_index; LocalProblem *goal_problem; LocalProblemNode *goal_node; int min_action_cost; priority_queues::AdaptiveQueue<LocalProblemNode *> node_queue; LocalProblem *get_local_problem(int var_no, int value); LocalProblem *build_problem_for_variable(int var_no) const; LocalProblem *build_problem_for_goal() const; int get_priority(LocalProblemNode *node) const; void initialize_heap(); void add_to_heap(LocalProblemNode *node); bool is_local_problem_set_up(const LocalProblem *problem) const; void set_up_local_problem(LocalProblem *problem, int base_priority, int start_value, const State &state); void try_to_fire_transition(LocalTransition *trans); void expand_node(LocalProblemNode *node); void expand_transition(LocalTransition *trans, const State &state); int compute_costs(const State &state); void mark_helpful_transitions( LocalProblem *problem, LocalProblemNode *node, const State &state); // Clears "reached_by" of visited nodes as a side effect to avoid // recursing to the same node again. protected: virtual int compute_heuristic(const State &ancestor_state) override; public: explicit ContextEnhancedAdditiveHeuristic(const options::Options &opts); ~ContextEnhancedAdditiveHeuristic(); virtual bool dead_ends_are_reliable() const override; }; } #endif
1,977
C
31.966666
99
0.734952
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/lm_cut_heuristic.cc
#include "lm_cut_heuristic.h" #include "lm_cut_landmarks.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_proxy.h" #include "../task_utils/task_properties.h" #include "../utils/logging.h" #include "../utils/memory.h" #include <iostream> using namespace std; namespace lm_cut_heuristic { LandmarkCutHeuristic::LandmarkCutHeuristic(const Options &opts) : Heuristic(opts), landmark_generator(utils::make_unique_ptr<LandmarkCutLandmarks>(task_proxy)) { utils::g_log << "Initializing landmark cut heuristic..." << endl; } LandmarkCutHeuristic::~LandmarkCutHeuristic() { } int LandmarkCutHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); int total_cost = 0; bool dead_end = landmark_generator->compute_landmarks( state, [&total_cost](int cut_cost) {total_cost += cut_cost;}, nullptr); if (dead_end) return DEAD_END; return total_cost; } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("Landmark-cut heuristic", ""); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "not supported"); parser.document_language_support("axioms", "not supported"); parser.document_property("admissible", "yes"); parser.document_property("consistent", "no"); parser.document_property("safe", "yes"); parser.document_property("preferred operators", "no"); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<LandmarkCutHeuristic>(opts); } static Plugin<Evaluator> _plugin("lmcut", _parse); }
1,784
C++
28.75
84
0.690022
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/blind_search_heuristic.cc
#include "blind_search_heuristic.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_utils/task_properties.h" #include "../utils/logging.h" #include <cstddef> #include <limits> #include <utility> using namespace std; namespace blind_search_heuristic { BlindSearchHeuristic::BlindSearchHeuristic(const Options &opts) : Heuristic(opts), min_operator_cost(task_properties::get_min_operator_cost(task_proxy)) { utils::g_log << "Initializing blind search heuristic..." << endl; } BlindSearchHeuristic::~BlindSearchHeuristic() { } int BlindSearchHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); if (task_properties::is_goal_state(task_proxy, state)) return 0; else return min_operator_cost; } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("Blind heuristic", "Returns cost of cheapest action for " "non-goal states, " "0 for goal states"); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "supported"); parser.document_language_support("axioms", "supported"); parser.document_property("admissible", "yes"); parser.document_property("consistent", "yes"); parser.document_property("safe", "yes"); parser.document_property("preferred operators", "no"); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<BlindSearchHeuristic>(opts); } static Plugin<Evaluator> _plugin("blind", _parse); }
1,761
C++
30.464285
77
0.670642
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/lm_cut_heuristic.h
#ifndef HEURISTICS_LM_CUT_HEURISTIC_H #define HEURISTICS_LM_CUT_HEURISTIC_H #include "../heuristic.h" #include <memory> namespace options { class Options; } namespace lm_cut_heuristic { class LandmarkCutLandmarks; class LandmarkCutHeuristic : public Heuristic { std::unique_ptr<LandmarkCutLandmarks> landmark_generator; virtual int compute_heuristic(const State &ancestor_state) override; public: explicit LandmarkCutHeuristic(const options::Options &opts); virtual ~LandmarkCutHeuristic() override; }; } #endif
535
C
19.615384
72
0.768224
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/relaxation_heuristic.cc
#include "relaxation_heuristic.h" #include "../task_utils/task_properties.h" #include "../utils/collections.h" #include "../utils/logging.h" #include "../utils/timer.h" #include <algorithm> #include <cassert> #include <cstddef> #include <unordered_map> #include <vector> using namespace std; namespace relaxation_heuristic { Proposition::Proposition() : cost(-1), reached_by(NO_OP), is_goal(false), marked(false), num_precondition_occurences(-1) { } UnaryOperator::UnaryOperator( int num_preconditions, array_pool::ArrayPoolIndex preconditions, PropID effect, int operator_no, int base_cost) : effect(effect), base_cost(base_cost), num_preconditions(num_preconditions), preconditions(preconditions), operator_no(operator_no) { } // construction and destruction RelaxationHeuristic::RelaxationHeuristic(const options::Options &opts) : Heuristic(opts) { // Build propositions. propositions.resize(task_properties::get_num_facts(task_proxy)); // Build proposition offsets. VariablesProxy variables = task_proxy.get_variables(); proposition_offsets.reserve(variables.size()); PropID offset = 0; for (VariableProxy var : variables) { proposition_offsets.push_back(offset); offset += var.get_domain_size(); } assert(offset == static_cast<int>(propositions.size())); // Build goal propositions. GoalsProxy goals = task_proxy.get_goals(); goal_propositions.reserve(goals.size()); for (FactProxy goal : goals) { PropID prop_id = get_prop_id(goal); propositions[prop_id].is_goal = true; goal_propositions.push_back(prop_id); } // Build unary operators for operators and axioms. unary_operators.reserve( task_properties::get_num_total_effects(task_proxy)); for (OperatorProxy op : task_proxy.get_operators()) build_unary_operators(op); for (OperatorProxy axiom : task_proxy.get_axioms()) build_unary_operators(axiom); // Simplify unary operators. utils::Timer simplify_timer; simplify(); utils::g_log << "time to simplify: " << simplify_timer << endl; // Cross-reference unary operators. vector<vector<OpID>> precondition_of_vectors(propositions.size()); int num_unary_ops = unary_operators.size(); for (OpID op_id = 0; op_id < num_unary_ops; ++op_id) { for (PropID precond : get_preconditions(op_id)) precondition_of_vectors[precond].push_back(op_id); } int num_propositions = propositions.size(); for (PropID prop_id = 0; prop_id < num_propositions; ++prop_id) { const auto &precondition_of_vec = precondition_of_vectors[prop_id]; propositions[prop_id].precondition_of = precondition_of_pool.append(precondition_of_vec); propositions[prop_id].num_precondition_occurences = precondition_of_vec.size(); } } bool RelaxationHeuristic::dead_ends_are_reliable() const { return !task_properties::has_axioms(task_proxy); } PropID RelaxationHeuristic::get_prop_id(int var, int value) const { return proposition_offsets[var] + value; } PropID RelaxationHeuristic::get_prop_id(const FactProxy &fact) const { return get_prop_id(fact.get_variable().get_id(), fact.get_value()); } const Proposition *RelaxationHeuristic::get_proposition( int var, int value) const { return &propositions[get_prop_id(var, value)]; } Proposition *RelaxationHeuristic::get_proposition(int var, int value) { return &propositions[get_prop_id(var, value)]; } Proposition *RelaxationHeuristic::get_proposition(const FactProxy &fact) { return get_proposition(fact.get_variable().get_id(), fact.get_value()); } void RelaxationHeuristic::build_unary_operators(const OperatorProxy &op) { int op_no = op.is_axiom() ? -1 : op.get_id(); int base_cost = op.get_cost(); vector<PropID> precondition_props; PreconditionsProxy preconditions = op.get_preconditions(); precondition_props.reserve(preconditions.size()); for (FactProxy precondition : preconditions) { precondition_props.push_back(get_prop_id(precondition)); } for (EffectProxy effect : op.get_effects()) { PropID effect_prop = get_prop_id(effect.get_fact()); EffectConditionsProxy eff_conds = effect.get_conditions(); precondition_props.reserve(preconditions.size() + eff_conds.size()); for (FactProxy eff_cond : eff_conds) { precondition_props.push_back(get_prop_id(eff_cond)); } // The sort-unique can eventually go away. See issue497. vector<PropID> preconditions_copy(precondition_props); utils::sort_unique(preconditions_copy); array_pool::ArrayPoolIndex precond_index = preconditions_pool.append(preconditions_copy); unary_operators.emplace_back( preconditions_copy.size(), precond_index, effect_prop, op_no, base_cost); precondition_props.erase(precondition_props.end() - eff_conds.size(), precondition_props.end()); } } void RelaxationHeuristic::simplify() { /* Remove dominated unary operators, including duplicates. Unary operators with more than MAX_PRECONDITIONS_TO_TEST preconditions are (mostly; see code comments below for details) ignored because we cannot handle them efficiently. This is obviously an inelegant solution. Apart from this restriction, operator o1 dominates operator o2 if: 1. eff(o1) = eff(o2), and 2. pre(o1) is a (not necessarily strict) subset of pre(o2), and 3. cost(o1) <= cost(o2), and either 4a. At least one of 2. and 3. is strict, or 4b. id(o1) < id(o2). (Here, "id" is the position in the unary_operators vector.) This defines a strict partial order. */ #ifndef NDEBUG int num_ops = unary_operators.size(); for (OpID op_id = 0; op_id < num_ops; ++op_id) assert(utils::is_sorted_unique(get_preconditions_vector(op_id))); #endif const int MAX_PRECONDITIONS_TO_TEST = 5; utils::g_log << "Simplifying " << unary_operators.size() << " unary operators..." << flush; /* First, we create a map that maps the preconditions and effect ("key") of each operator to its cost and index ("value"). If multiple operators have the same key, the one with lowest cost wins. If this still results in a tie, the one with lowest index wins. These rules can be tested with a lexicographical comparison of the value. Note that for operators sharing the same preconditions and effect, our dominance relationship above is actually a strict *total* order (order by cost, then by id). For each key present in the data, the map stores the dominating element in this total order. */ using Key = pair<vector<PropID>, PropID>; using Value = pair<int, OpID>; using Map = utils::HashMap<Key, Value>; Map unary_operator_index; unary_operator_index.reserve(unary_operators.size()); for (size_t op_no = 0; op_no < unary_operators.size(); ++op_no) { const UnaryOperator &op = unary_operators[op_no]; /* Note: we consider operators with more than MAX_PRECONDITIONS_TO_TEST preconditions here because we can still filter out "exact matches" for these, i.e., the first test in `is_dominated`. */ Key key(get_preconditions_vector(op_no), op.effect); Value value(op.base_cost, op_no); auto inserted = unary_operator_index.insert( make_pair(move(key), value)); if (!inserted.second) { // We already had an element with this key; check its cost. Map::iterator iter = inserted.first; Value old_value = iter->second; if (value < old_value) iter->second = value; } } /* `dominating_key` is conceptually a local variable of `is_dominated`. We declare it outside to reduce vector allocation overhead. */ Key dominating_key; /* is_dominated: test if a given operator is dominated by an operator in the map. */ auto is_dominated = [&](const UnaryOperator &op) { /* Check all possible subsets X of pre(op) to see if there is a dominating operator with preconditions X represented in the map. */ OpID op_id = get_op_id(op); int cost = op.base_cost; const vector<PropID> precondition = get_preconditions_vector(op_id); /* We handle the case X = pre(op) specially for efficiency and to ensure that an operator is not considered to be dominated by itself. From the discussion above that operators with the same precondition and effect are actually totally ordered, it is enough to test here whether looking up the key of op in the map results in an entry including op itself. */ if (unary_operator_index[make_pair(precondition, op.effect)].second != op_id) return true; /* We now handle all cases where X is a strict subset of pre(op). Our map lookup ensures conditions 1. and 2., and because X is a strict subset, we also have 4a (which means we don't need 4b). So it only remains to check 3 for all hits. */ if (op.num_preconditions > MAX_PRECONDITIONS_TO_TEST) { /* The runtime of the following code grows exponentially with the number of preconditions. */ return false; } vector<PropID> &dominating_precondition = dominating_key.first; dominating_key.second = op.effect; // We subtract "- 1" to generate all *strict* subsets of precondition. int powerset_size = (1 << precondition.size()) - 1; for (int mask = 0; mask < powerset_size; ++mask) { dominating_precondition.clear(); for (size_t i = 0; i < precondition.size(); ++i) if (mask & (1 << i)) dominating_precondition.push_back(precondition[i]); Map::iterator found = unary_operator_index.find(dominating_key); if (found != unary_operator_index.end()) { Value dominator_value = found->second; int dominator_cost = dominator_value.first; if (dominator_cost <= cost) return true; } } return false; }; unary_operators.erase( remove_if( unary_operators.begin(), unary_operators.end(), is_dominated), unary_operators.end()); utils::g_log << " done! [" << unary_operators.size() << " unary operators]" << endl; } }
11,058
C++
35.986622
104
0.626063
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/goal_count_heuristic.cc
#include "goal_count_heuristic.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/logging.h" #include <iostream> using namespace std; namespace goal_count_heuristic { GoalCountHeuristic::GoalCountHeuristic(const Options &opts) : Heuristic(opts) { utils::g_log << "Initializing goal count heuristic..." << endl; } int GoalCountHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); int unsatisfied_goal_count = 0; for (FactProxy goal : task_proxy.get_goals()) { const VariableProxy var = goal.get_variable(); if (state[var] != goal) { ++unsatisfied_goal_count; } } return unsatisfied_goal_count; } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("Goal count heuristic", ""); parser.document_language_support("action costs", "ignored by design"); parser.document_language_support("conditional effects", "supported"); parser.document_language_support("axioms", "supported"); parser.document_property("admissible", "no"); parser.document_property("consistent", "no"); parser.document_property("safe", "yes"); parser.document_property("preferred operators", "no"); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<GoalCountHeuristic>(opts); } static Plugin<Evaluator> _plugin("goalcount", _parse); }
1,539
C++
29.196078
74
0.681611
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/additive_heuristic.h
#ifndef HEURISTICS_ADDITIVE_HEURISTIC_H #define HEURISTICS_ADDITIVE_HEURISTIC_H #include "relaxation_heuristic.h" #include "../algorithms/priority_queues.h" #include "../utils/collections.h" #include <cassert> class State; namespace additive_heuristic { using relaxation_heuristic::PropID; using relaxation_heuristic::OpID; using relaxation_heuristic::NO_OP; using relaxation_heuristic::Proposition; using relaxation_heuristic::UnaryOperator; class AdditiveHeuristic : public relaxation_heuristic::RelaxationHeuristic { /* Costs larger than MAX_COST_VALUE are clamped to max_value. The precise value (100M) is a bit of a hack, since other parts of the code don't reliably check against overflow as of this writing. With a value of 100M, we want to ensure that even weighted A* with a weight of 10 will have f values comfortably below the signed 32-bit int upper bound. */ static const int MAX_COST_VALUE = 100000000; priority_queues::AdaptiveQueue<PropID> queue; bool did_write_overflow_warning; void setup_exploration_queue(); void setup_exploration_queue_state(const State &state); void relaxed_exploration(); void mark_preferred_operators(const State &state, PropID goal_id); void enqueue_if_necessary(PropID prop_id, int cost, OpID op_id) { assert(cost >= 0); Proposition *prop = get_proposition(prop_id); if (prop->cost == -1 || prop->cost > cost) { prop->cost = cost; prop->reached_by = op_id; queue.push(cost, prop_id); } assert(prop->cost != -1 && prop->cost <= cost); } void increase_cost(int &cost, int amount) { assert(cost >= 0); assert(amount >= 0); cost += amount; if (cost > MAX_COST_VALUE) { write_overflow_warning(); cost = MAX_COST_VALUE; } } void write_overflow_warning(); protected: virtual int compute_heuristic(const State &ancestor_state) override; // Common part of h^add and h^ff computation. int compute_add_and_ff(const State &state); public: explicit AdditiveHeuristic(const options::Options &opts); /* TODO: The two methods below are temporarily needed for the CEGAR heuristic. In the long run it might be better to split the computation from the heuristic class. Then the CEGAR code could use the computation object instead of the heuristic. */ void compute_heuristic_for_cegar(const State &state); int get_cost_for_cegar(int var, int value) const { return get_proposition(var, value)->cost; } }; } #endif
2,653
C
30.223529
76
0.669431
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/goal_count_heuristic.h
#ifndef HEURISTICS_GOAL_COUNT_HEURISTIC_H #define HEURISTICS_GOAL_COUNT_HEURISTIC_H #include "../heuristic.h" namespace goal_count_heuristic { class GoalCountHeuristic : public Heuristic { protected: virtual int compute_heuristic(const State &ancestor_state) override; public: explicit GoalCountHeuristic(const options::Options &opts); }; } #endif
359
C
21.499999
72
0.774373
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/additive_heuristic.cc
#include "additive_heuristic.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_utils/task_properties.h" #include "../utils/logging.h" #include <cassert> #include <vector> using namespace std; namespace additive_heuristic { const int AdditiveHeuristic::MAX_COST_VALUE; // construction and destruction AdditiveHeuristic::AdditiveHeuristic(const Options &opts) : RelaxationHeuristic(opts), did_write_overflow_warning(false) { utils::g_log << "Initializing additive heuristic..." << endl; } void AdditiveHeuristic::write_overflow_warning() { if (!did_write_overflow_warning) { // TODO: Should have a planner-wide warning mechanism to handle // things like this. utils::g_log << "WARNING: overflow on h^add! Costs clamped to " << MAX_COST_VALUE << endl; cerr << "WARNING: overflow on h^add! Costs clamped to " << MAX_COST_VALUE << endl; did_write_overflow_warning = true; } } // heuristic computation void AdditiveHeuristic::setup_exploration_queue() { queue.clear(); for (Proposition &prop : propositions) { prop.cost = -1; prop.marked = false; } // Deal with operators and axioms without preconditions. for (UnaryOperator &op : unary_operators) { op.unsatisfied_preconditions = op.num_preconditions; op.cost = op.base_cost; // will be increased by precondition costs if (op.unsatisfied_preconditions == 0) enqueue_if_necessary(op.effect, op.base_cost, get_op_id(op)); } } void AdditiveHeuristic::setup_exploration_queue_state(const State &state) { for (FactProxy fact : state) { PropID init_prop = get_prop_id(fact); enqueue_if_necessary(init_prop, 0, NO_OP); } } void AdditiveHeuristic::relaxed_exploration() { int unsolved_goals = goal_propositions.size(); while (!queue.empty()) { pair<int, PropID> top_pair = queue.pop(); int distance = top_pair.first; PropID prop_id = top_pair.second; Proposition *prop = get_proposition(prop_id); int prop_cost = prop->cost; assert(prop_cost >= 0); assert(prop_cost <= distance); if (prop_cost < distance) continue; if (prop->is_goal && --unsolved_goals == 0) return; for (OpID op_id : precondition_of_pool.get_slice( prop->precondition_of, prop->num_precondition_occurences)) { UnaryOperator *unary_op = get_operator(op_id); increase_cost(unary_op->cost, prop_cost); --unary_op->unsatisfied_preconditions; assert(unary_op->unsatisfied_preconditions >= 0); if (unary_op->unsatisfied_preconditions == 0) enqueue_if_necessary(unary_op->effect, unary_op->cost, op_id); } } } void AdditiveHeuristic::mark_preferred_operators( const State &state, PropID goal_id) { Proposition *goal = get_proposition(goal_id); if (!goal->marked) { // Only consider each subgoal once. goal->marked = true; OpID op_id = goal->reached_by; if (op_id != NO_OP) { // We have not yet chained back to a start node. UnaryOperator *unary_op = get_operator(op_id); bool is_preferred = true; for (PropID precond : get_preconditions(op_id)) { mark_preferred_operators(state, precond); if (get_proposition(precond)->reached_by != NO_OP) { is_preferred = false; } } int operator_no = unary_op->operator_no; if (is_preferred && operator_no != -1) { // This is not an axiom. OperatorProxy op = task_proxy.get_operators()[operator_no]; assert(task_properties::is_applicable(op, state)); set_preferred(op); } } } } int AdditiveHeuristic::compute_add_and_ff(const State &state) { setup_exploration_queue(); setup_exploration_queue_state(state); relaxed_exploration(); int total_cost = 0; for (PropID goal_id : goal_propositions) { const Proposition *goal = get_proposition(goal_id); int goal_cost = goal->cost; if (goal_cost == -1) return DEAD_END; increase_cost(total_cost, goal_cost); } return total_cost; } int AdditiveHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); int h = compute_add_and_ff(state); if (h != DEAD_END) { for (PropID goal_id : goal_propositions) mark_preferred_operators(state, goal_id); } return h; } void AdditiveHeuristic::compute_heuristic_for_cegar(const State &state) { compute_heuristic(state); } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("Additive heuristic", ""); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "supported"); parser.document_language_support( "axioms", "supported (in the sense that the planner won't complain -- " "handling of axioms might be very stupid " "and even render the heuristic unsafe)"); parser.document_property("admissible", "no"); parser.document_property("consistent", "no"); parser.document_property("safe", "yes for tasks without axioms"); parser.document_property("preferred operators", "yes"); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<AdditiveHeuristic>(opts); } static Plugin<Evaluator> _plugin("add", _parse); }
5,822
C++
33.455621
78
0.616111
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/relaxation_heuristic.h
#ifndef HEURISTICS_RELAXATION_HEURISTIC_H #define HEURISTICS_RELAXATION_HEURISTIC_H #include "array_pool.h" #include "../heuristic.h" #include "../utils/collections.h" #include <cassert> #include <vector> class FactProxy; class OperatorProxy; namespace relaxation_heuristic { struct Proposition; struct UnaryOperator; using PropID = int; using OpID = int; const OpID NO_OP = -1; struct Proposition { Proposition(); int cost; // used for h^max cost or h^add cost // TODO: Make sure in constructor that reached_by does not overflow. OpID reached_by : 30; /* The following two variables are conceptually bools, but Visual C++ does not support packing ints and bools together in a bitfield. */ unsigned int is_goal : 1; unsigned int marked : 1; // used for preferred operators of h^add and h^FF int num_precondition_occurences; array_pool::ArrayPoolIndex precondition_of; }; static_assert(sizeof(Proposition) == 16, "Proposition has wrong size"); struct UnaryOperator { UnaryOperator(int num_preconditions, array_pool::ArrayPoolIndex preconditions, PropID effect, int operator_no, int base_cost); int cost; // Used for h^max cost or h^add cost; // includes operator cost (base_cost) int unsatisfied_preconditions; PropID effect; int base_cost; int num_preconditions; array_pool::ArrayPoolIndex preconditions; int operator_no; // -1 for axioms; index into the task's operators otherwise }; static_assert(sizeof(UnaryOperator) == 28, "UnaryOperator has wrong size"); class RelaxationHeuristic : public Heuristic { void build_unary_operators(const OperatorProxy &op); void simplify(); // proposition_offsets[var_no]: first PropID related to variable var_no std::vector<PropID> proposition_offsets; protected: std::vector<UnaryOperator> unary_operators; std::vector<Proposition> propositions; std::vector<PropID> goal_propositions; array_pool::ArrayPool preconditions_pool; array_pool::ArrayPool precondition_of_pool; array_pool::ArrayPoolSlice get_preconditions(OpID op_id) const { const UnaryOperator &op = unary_operators[op_id]; return preconditions_pool.get_slice(op.preconditions, op.num_preconditions); } // HACK! std::vector<PropID> get_preconditions_vector(OpID op_id) const { auto view = get_preconditions(op_id); return std::vector<PropID>(view.begin(), view.end()); } /* TODO: Some of these protected methods are only needed for the CEGAR hack in the additive heuristic and should eventually go away. */ PropID get_prop_id(const Proposition &prop) const { PropID prop_id = &prop - propositions.data(); assert(utils::in_bounds(prop_id, propositions)); return prop_id; } OpID get_op_id(const UnaryOperator &op) const { OpID op_id = &op - unary_operators.data(); assert(utils::in_bounds(op_id, unary_operators)); return op_id; } PropID get_prop_id(int var, int value) const; PropID get_prop_id(const FactProxy &fact) const; Proposition *get_proposition(PropID prop_id) { return &propositions[prop_id]; } UnaryOperator *get_operator(OpID op_id) { return &unary_operators[op_id]; } const Proposition *get_proposition(int var, int value) const; Proposition *get_proposition(int var, int value); Proposition *get_proposition(const FactProxy &fact); public: explicit RelaxationHeuristic(const options::Options &options); virtual bool dead_ends_are_reliable() const override; }; } #endif
3,688
C
29.741666
84
0.684111
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/max_heuristic.h
#ifndef HEURISTICS_MAX_HEURISTIC_H #define HEURISTICS_MAX_HEURISTIC_H #include "relaxation_heuristic.h" #include "../algorithms/priority_queues.h" #include <cassert> namespace max_heuristic { using relaxation_heuristic::PropID; using relaxation_heuristic::OpID; using relaxation_heuristic::Proposition; using relaxation_heuristic::UnaryOperator; class HSPMaxHeuristic : public relaxation_heuristic::RelaxationHeuristic { priority_queues::AdaptiveQueue<PropID> queue; void setup_exploration_queue(); void setup_exploration_queue_state(const State &state); void relaxed_exploration(); void enqueue_if_necessary(PropID prop_id, int cost) { assert(cost >= 0); Proposition *prop = get_proposition(prop_id); if (prop->cost == -1 || prop->cost > cost) { prop->cost = cost; queue.push(cost, prop_id); } assert(prop->cost != -1 && prop->cost <= cost); } protected: virtual int compute_heuristic(const State &ancestor_state) override; public: explicit HSPMaxHeuristic(const options::Options &opts); }; } #endif
1,106
C
25.999999
74
0.69349
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/hm_heuristic.h
#ifndef HEURISTICS_HM_HEURISTIC_H #define HEURISTICS_HM_HEURISTIC_H #include "../heuristic.h" #include <algorithm> #include <iostream> #include <map> #include <string> #include <vector> namespace options { class Options; } namespace hm_heuristic { /* Haslum's h^m heuristic family ("critical path heuristics"). This is a very slow implementation and should not be used for speed benchmarks. */ class HMHeuristic : public Heuristic { using Tuple = std::vector<FactPair>; // parameters const int m; const bool has_cond_effects; const Tuple goals; // h^m table std::map<Tuple, int> hm_table; bool was_updated; // auxiliary methods void init_hm_table(const Tuple &t); void update_hm_table(); int eval(const Tuple &t) const; int update_hm_entry(const Tuple &t, int val); void extend_tuple(const Tuple &t, const OperatorProxy &op); int check_tuple_in_tuple(const Tuple &tuple, const Tuple &big_tuple) const; int get_operator_pre_value(const OperatorProxy &op, int var) const; Tuple get_operator_pre(const OperatorProxy &op) const; Tuple get_operator_eff(const OperatorProxy &op) const; bool contradict_effect_of(const OperatorProxy &op, int var, int val) const; void generate_all_tuples(); void generate_all_tuples_aux(int var, int sz, const Tuple &base); void generate_all_partial_tuples(const Tuple &base_tuple, std::vector<Tuple> &res) const; void generate_all_partial_tuples_aux(const Tuple &base_tuple, const Tuple &t, int index, int sz, std::vector<Tuple> &res) const; void dump_table() const; protected: virtual int compute_heuristic(const State &ancestor_state) override; public: explicit HMHeuristic(const options::Options &opts); virtual bool dead_ends_are_reliable() const override; }; } #endif
1,910
C
25.915493
92
0.675393
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/blind_search_heuristic.h
#ifndef HEURISTICS_BLIND_SEARCH_HEURISTIC_H #define HEURISTICS_BLIND_SEARCH_HEURISTIC_H #include "../heuristic.h" namespace blind_search_heuristic { class BlindSearchHeuristic : public Heuristic { int min_operator_cost; protected: virtual int compute_heuristic(const State &ancestor_state) override; public: BlindSearchHeuristic(const options::Options &opts); ~BlindSearchHeuristic(); }; } #endif
416
C
22.166665
72
0.764423
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/lm_cut_landmarks.h
#ifndef HEURISTICS_LM_CUT_LANDMARKS_H #define HEURISTICS_LM_CUT_LANDMARKS_H #include "../task_proxy.h" #include "../algorithms/priority_queues.h" #include <cassert> #include <functional> #include <memory> #include <vector> namespace lm_cut_heuristic { // TODO: Fix duplication with the other relaxation heuristics. struct RelaxedProposition; enum PropositionStatus { UNREACHED = 0, REACHED = 1, GOAL_ZONE = 2, BEFORE_GOAL_ZONE = 3 }; struct RelaxedOperator { int original_op_id; std::vector<RelaxedProposition *> preconditions; std::vector<RelaxedProposition *> effects; int base_cost; // 0 for axioms, 1 for regular operators int cost; int unsatisfied_preconditions; int h_max_supporter_cost; // h_max_cost of h_max_supporter RelaxedProposition *h_max_supporter; RelaxedOperator(std::vector<RelaxedProposition *> &&pre, std::vector<RelaxedProposition *> &&eff, int op_id, int base) : original_op_id(op_id), preconditions(pre), effects(eff), base_cost(base) { } inline void update_h_max_supporter(); }; struct RelaxedProposition { std::vector<RelaxedOperator *> precondition_of; std::vector<RelaxedOperator *> effect_of; PropositionStatus status; int h_max_cost; }; class LandmarkCutLandmarks { std::vector<RelaxedOperator> relaxed_operators; std::vector<std::vector<RelaxedProposition>> propositions; RelaxedProposition artificial_precondition; RelaxedProposition artificial_goal; int num_propositions; priority_queues::AdaptiveQueue<RelaxedProposition *> priority_queue; void build_relaxed_operator(const OperatorProxy &op); void add_relaxed_operator(std::vector<RelaxedProposition *> &&precondition, std::vector<RelaxedProposition *> &&effects, int op_id, int base_cost); RelaxedProposition *get_proposition(const FactProxy &fact); void setup_exploration_queue(); void setup_exploration_queue_state(const State &state); void first_exploration(const State &state); void first_exploration_incremental(std::vector<RelaxedOperator *> &cut); void second_exploration(const State &state, std::vector<RelaxedProposition *> &second_exploration_queue, std::vector<RelaxedOperator *> &cut); void enqueue_if_necessary(RelaxedProposition *prop, int cost) { assert(cost >= 0); if (prop->status == UNREACHED || prop->h_max_cost > cost) { prop->status = REACHED; prop->h_max_cost = cost; priority_queue.push(cost, prop); } } void mark_goal_plateau(RelaxedProposition *subgoal); void validate_h_max() const; public: using Landmark = std::vector<int>; using CostCallback = std::function<void (int)>; using LandmarkCallback = std::function<void (const Landmark &, int)>; LandmarkCutLandmarks(const TaskProxy &task_proxy); virtual ~LandmarkCutLandmarks(); /* Compute LM-cut landmarks for the given state. If cost_callback is not nullptr, it is called once with the cost of each discovered landmark. If landmark_callback is not nullptr, it is called with each discovered landmark (as a vector of operator indices) and its cost. This requires making a copy of the landmark, so cost_callback should be used if only the cost of the landmark is needed. Returns true iff state is detected as a dead end. */ bool compute_landmarks(const State &state, CostCallback cost_callback, LandmarkCallback landmark_callback); }; inline void RelaxedOperator::update_h_max_supporter() { assert(!unsatisfied_preconditions); for (size_t i = 0; i < preconditions.size(); ++i) if (preconditions[i]->h_max_cost > h_max_supporter->h_max_cost) h_max_supporter = preconditions[i]; h_max_supporter_cost = h_max_supporter->h_max_cost; } } #endif
4,032
C
33.177966
88
0.669147
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/cea_heuristic.cc
#include "cea_heuristic.h" #include "domain_transition_graph.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_utils/task_properties.h" #include "../utils/logging.h" #include <cassert> #include <limits> #include <vector> using namespace std; using namespace domain_transition_graph; /* Implementation notes: The main data structures are: - LocalProblem: a single "copy" of a domain transition graph, which is used to compute the costs of achieving all facts (v=d') for a fixed variable v starting from a fixed value d. So we can have at most |dom(v)| many local problems for any variable v. These are created lazily as needed. - LocalProblemNode: a single vertex in the domain transition graph represented by a LocalProblem. Knows what the successors in the graph are and keeps tracks of costs and helpful transitions for the node. - LocalTransition: a transition between two local problem nodes. Keeps track of how many unachieved preconditions there still are, what the cost of enabling the transition are and things like that. The following two design decisions might be worth revisiting: - Each local problem keeps its own copy of the graph itself (what is connected to what via which labels), even though this is not necessary. The "static" graph info and the "dynamic" info could be split, potentially saving quite a bit of memory. - The graph is encoded with reference cycles: each transition knows what its source node is, even though this is in a sense redundant (the source must be the node which holds the transition), and every node knows what its local problem is, which is similarly redundant (the local problem must be the one that holds this node). If we got rid of this, the main queue of the algorithm would need (LocalProblem *, LocalProblemNode *) pairs rather than straight node pointers, and the waiting lists would need to contain (LocalProblemNode *, LocalTransition *) pairs rather than straight transitions. So it's not clear if this would really save much, which is why we do not currently do it. */ namespace cea_heuristic { struct LocalTransition { LocalProblemNode *source; LocalProblemNode *target; const ValueTransitionLabel *label; int action_cost; int target_cost; int unreached_conditions; LocalTransition( LocalProblemNode *source_, LocalProblemNode *target_, const ValueTransitionLabel *label_, int action_cost_) : source(source_), target(target_), label(label_), action_cost(action_cost_), target_cost(-1), unreached_conditions(-1) { // target_cost and unreached_cost are initialized by // expand_transition. } ~LocalTransition() { } }; struct LocalProblemNode { // Attributes fixed during initialization. LocalProblem *owner; vector<LocalTransition> outgoing_transitions; // Dynamic attributes (modified during heuristic computation). int cost; bool expanded; vector<short> context; LocalTransition *reached_by; /* Before a node is expanded, reached_by is the "current best" transition leading to this node. After a node is expanded, the reached_by value of the parent is copied (unless the parent is the initial node), so that reached_by is the *first* transition on the optimal path to this node. This is useful for preferred operators. (The two attributes used to be separate, but this was a bit wasteful.) */ vector<LocalTransition *> waiting_list; LocalProblemNode(LocalProblem *owner_, int context_size) : owner(owner_), cost(-1), expanded(false), context(context_size, -1), reached_by(0) { } ~LocalProblemNode() { } }; struct LocalProblem { int base_priority; vector<LocalProblemNode> nodes; vector<int> *context_variables; public: LocalProblem() : base_priority(-1) { } ~LocalProblem() { } }; LocalProblem *ContextEnhancedAdditiveHeuristic::get_local_problem( int var_no, int value) { LocalProblem * &table_entry = local_problem_index[var_no][value]; if (!table_entry) { table_entry = build_problem_for_variable(var_no); local_problems.push_back(table_entry); } return table_entry; } LocalProblem *ContextEnhancedAdditiveHeuristic::build_problem_for_variable( int var_no) const { LocalProblem *problem = new LocalProblem; DomainTransitionGraph *dtg = transition_graphs[var_no].get(); problem->context_variables = &dtg->local_to_global_child; int num_parents = problem->context_variables->size(); size_t num_values = task_proxy.get_variables()[var_no].get_domain_size(); problem->nodes.reserve(num_values); for (size_t value = 0; value < num_values; ++value) problem->nodes.push_back(LocalProblemNode(problem, num_parents)); // Compile the DTG arcs into LocalTransition objects. for (size_t value = 0; value < num_values; ++value) { LocalProblemNode &node = problem->nodes[value]; const ValueNode &dtg_node = dtg->nodes[value]; for (size_t i = 0; i < dtg_node.transitions.size(); ++i) { const ValueTransition &dtg_trans = dtg_node.transitions[i]; int target_value = dtg_trans.target->value; LocalProblemNode &target = problem->nodes[target_value]; for (const ValueTransitionLabel &label : dtg_trans.labels) { OperatorProxy op = label.is_axiom ? task_proxy.get_axioms()[label.op_id] : task_proxy.get_operators()[label.op_id]; LocalTransition trans(&node, &target, &label, op.get_cost()); node.outgoing_transitions.push_back(trans); } } } return problem; } LocalProblem *ContextEnhancedAdditiveHeuristic::build_problem_for_goal() const { LocalProblem *problem = new LocalProblem; GoalsProxy goals_proxy = task_proxy.get_goals(); problem->context_variables = new vector<int>; for (FactProxy goal : goals_proxy) problem->context_variables->push_back(goal.get_variable().get_id()); for (size_t value = 0; value < 2; ++value) problem->nodes.push_back(LocalProblemNode(problem, goals_proxy.size())); vector<LocalAssignment> goals; for (size_t goal_no = 0; goal_no < goals_proxy.size(); ++goal_no) { int goal_value = goals_proxy[goal_no].get_value(); goals.push_back(LocalAssignment(goal_no, goal_value)); } vector<LocalAssignment> no_effects; ValueTransitionLabel *label = new ValueTransitionLabel(0, true, goals, no_effects); LocalTransition trans(&problem->nodes[0], &problem->nodes[1], label, 0); problem->nodes[0].outgoing_transitions.push_back(trans); return problem; } int ContextEnhancedAdditiveHeuristic::get_priority( LocalProblemNode *node) const { /* Nodes have both a "cost" and a "priority", which are related. The cost is an estimate of how expensive it is to reach this node. The "priority" is the lowest cost value in the overall cost computation for which this node will be important. It is essentially the sum of the cost and a local-problem-specific "base priority", which depends on where this local problem is needed for the overall computation. */ return node->owner->base_priority + node->cost; } inline void ContextEnhancedAdditiveHeuristic::initialize_heap() { node_queue.clear(); } inline void ContextEnhancedAdditiveHeuristic::add_to_heap( LocalProblemNode *node) { node_queue.push(get_priority(node), node); } bool ContextEnhancedAdditiveHeuristic::is_local_problem_set_up( const LocalProblem *problem) const { return problem->base_priority != -1; } void ContextEnhancedAdditiveHeuristic::set_up_local_problem( LocalProblem *problem, int base_priority, int start_value, const State &state) { assert(problem->base_priority == -1); problem->base_priority = base_priority; for (auto &to_node : problem->nodes) { to_node.expanded = false; to_node.cost = numeric_limits<int>::max(); to_node.waiting_list.clear(); to_node.reached_by = 0; } LocalProblemNode *start = &problem->nodes[start_value]; start->cost = 0; for (size_t i = 0; i < problem->context_variables->size(); ++i) start->context[i] = state[(*problem->context_variables)[i]].get_value(); add_to_heap(start); } void ContextEnhancedAdditiveHeuristic::try_to_fire_transition( LocalTransition *trans) { if (!trans->unreached_conditions) { LocalProblemNode *target = trans->target; if (trans->target_cost < target->cost) { target->cost = trans->target_cost; target->reached_by = trans; add_to_heap(target); } } } void ContextEnhancedAdditiveHeuristic::expand_node(LocalProblemNode *node) { node->expanded = true; // Set context unless this was an initial node. LocalTransition *reached_by = node->reached_by; if (reached_by) { LocalProblemNode *parent = reached_by->source; vector<short> &context = node->context; context = parent->context; const vector<LocalAssignment> &precond = reached_by->label->precond; for (size_t i = 0; i < precond.size(); ++i) context[precond[i].local_var] = precond[i].value; const vector<LocalAssignment> &effect = reached_by->label->effect; for (size_t i = 0; i < effect.size(); ++i) context[effect[i].local_var] = effect[i].value; if (parent->reached_by) node->reached_by = parent->reached_by; } for (size_t i = 0; i < node->waiting_list.size(); ++i) { LocalTransition *trans = node->waiting_list[i]; assert(trans->unreached_conditions); --trans->unreached_conditions; trans->target_cost += node->cost; try_to_fire_transition(trans); } node->waiting_list.clear(); } void ContextEnhancedAdditiveHeuristic::expand_transition( LocalTransition *trans, const State &state) { /* Called when the source of trans is reached by Dijkstra exploration. Try to compute cost for the target of the transition from the source cost, action cost, and set-up costs for the conditions on the label. The latter may yet be unknown, in which case we "subscribe" to the waiting list of the node that will tell us the correct value. */ assert(trans->source->cost >= 0); assert(trans->source->cost < numeric_limits<int>::max()); trans->target_cost = trans->source->cost + trans->action_cost; if (trans->target->cost <= trans->target_cost) { // Transition cannot find a shorter path to target. return; } trans->unreached_conditions = 0; const vector<LocalAssignment> &precond = trans->label->precond; vector<LocalAssignment>::const_iterator curr_precond = precond.begin(), last_precond = precond.end(); vector<short>::const_iterator context = trans->source->context.begin(); vector<int>::const_iterator parent_vars = trans->source->owner->context_variables->begin(); for (; curr_precond != last_precond; ++curr_precond) { int local_var = curr_precond->local_var; int current_val = context[local_var]; int precond_value = curr_precond->value; int precond_var_no = parent_vars[local_var]; if (current_val == precond_value) continue; LocalProblem *subproblem = get_local_problem( precond_var_no, current_val); if (!is_local_problem_set_up(subproblem)) { set_up_local_problem( subproblem, get_priority(trans->source), current_val, state); } LocalProblemNode *cond_node = &subproblem->nodes[precond_value]; if (cond_node->expanded) { trans->target_cost += cond_node->cost; if (trans->target->cost <= trans->target_cost) { // Transition cannot find a shorter path to target. return; } } else { cond_node->waiting_list.push_back(trans); ++trans->unreached_conditions; } } try_to_fire_transition(trans); } int ContextEnhancedAdditiveHeuristic::compute_costs(const State &state) { while (!node_queue.empty()) { pair<int, LocalProblemNode *> top_pair = node_queue.pop(); int curr_priority = top_pair.first; LocalProblemNode *node = top_pair.second; assert(is_local_problem_set_up(node->owner)); if (get_priority(node) < curr_priority) continue; if (node == goal_node) return node->cost; assert(get_priority(node) == curr_priority); expand_node(node); for (auto &transition : node->outgoing_transitions) expand_transition(&transition, state); } return DEAD_END; } void ContextEnhancedAdditiveHeuristic::mark_helpful_transitions( LocalProblem *problem, LocalProblemNode *node, const State &state) { assert(node->cost >= 0 && node->cost < numeric_limits<int>::max()); LocalTransition *first_on_path = node->reached_by; if (first_on_path) { node->reached_by = 0; // Clear to avoid revisiting this node later. if (first_on_path->target_cost == first_on_path->action_cost) { // Transition possibly applicable. const ValueTransitionLabel &label = *first_on_path->label; OperatorProxy op = label.is_axiom ? task_proxy.get_axioms()[label.op_id] : task_proxy.get_operators()[label.op_id]; if (min_action_cost != 0 || task_properties::is_applicable(op, state)) { // If there are no zero-cost actions, the target_cost/ // action_cost test above already guarantees applicability. assert(!op.is_axiom()); set_preferred(op); } } else { // Recursively compute helpful transitions for preconditions. int *context_vars = &*problem->context_variables->begin(); for (const auto &assignment : first_on_path->label->precond) { int precond_value = assignment.value; int local_var = assignment.local_var; int precond_var_no = context_vars[local_var]; if (state[precond_var_no].get_value() == precond_value) continue; LocalProblem *subproblem = get_local_problem( precond_var_no, state[precond_var_no].get_value()); LocalProblemNode *subnode = &subproblem->nodes[precond_value]; mark_helpful_transitions(subproblem, subnode, state); } } } } int ContextEnhancedAdditiveHeuristic::compute_heuristic( const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); initialize_heap(); goal_problem->base_priority = -1; for (LocalProblem *problem : local_problems) problem->base_priority = -1; set_up_local_problem(goal_problem, 0, 0, state); int heuristic = compute_costs(state); if (heuristic != DEAD_END && heuristic != 0) mark_helpful_transitions(goal_problem, goal_node, state); return heuristic; } ContextEnhancedAdditiveHeuristic::ContextEnhancedAdditiveHeuristic( const Options &opts) : Heuristic(opts), min_action_cost(task_properties::get_min_operator_cost(task_proxy)) { utils::g_log << "Initializing context-enhanced additive heuristic..." << endl; DTGFactory factory(task_proxy, true, [](int, int) {return false;}); transition_graphs = factory.build_dtgs(); goal_problem = build_problem_for_goal(); goal_node = &goal_problem->nodes[1]; VariablesProxy vars = task_proxy.get_variables(); local_problem_index.resize(vars.size()); for (VariableProxy var : vars) local_problem_index[var.get_id()].resize(var.get_domain_size(), 0); } ContextEnhancedAdditiveHeuristic::~ContextEnhancedAdditiveHeuristic() { if (goal_problem) { delete goal_problem->context_variables; delete goal_problem->nodes[0].outgoing_transitions[0].label; } delete goal_problem; for (LocalProblem *problem : local_problems) delete problem; } bool ContextEnhancedAdditiveHeuristic::dead_ends_are_reliable() const { return false; } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("Context-enhanced additive heuristic", ""); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "supported"); parser.document_language_support( "axioms", "supported (in the sense that the planner won't complain -- " "handling of axioms might be very stupid " "and even render the heuristic unsafe)"); parser.document_property("admissible", "no"); parser.document_property("consistent", "no"); parser.document_property("safe", "no"); parser.document_property("preferred operators", "yes"); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<ContextEnhancedAdditiveHeuristic>(opts); } static Plugin<Evaluator> _plugin("cea", _parse); }
17,595
C++
36.438298
87
0.651719
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/cg_cache.cc
#include "cg_cache.h" #include "../task_proxy.h" #include "../task_utils/causal_graph.h" #include "../utils/collections.h" #include "../utils/logging.h" #include "../utils/math.h" #include <algorithm> #include <cassert> #include <iostream> #include <vector> using namespace std; namespace cg_heuristic { const int CGCache::NOT_COMPUTED; CGCache::CGCache(const TaskProxy &task_proxy, int max_cache_size) : task_proxy(task_proxy) { utils::g_log << "Initializing heuristic cache... " << flush; int var_count = task_proxy.get_variables().size(); const causal_graph::CausalGraph &cg = task_proxy.get_causal_graph(); // Compute inverted causal graph. depends_on.resize(var_count); for (int var = 0; var < var_count; ++var) { for (auto succ_var : cg.get_pre_to_eff(var)) { // Ignore arcs that are not part of the reduced CG: // These are ignored by the CG heuristic. if (succ_var > var) depends_on[succ_var].push_back(var); } } // Compute transitive closure of inverted reduced causal graph. // This is made easier because it is acyclic and the variables // are in topological order. for (int var = 0; var < var_count; ++var) { size_t num_affectors = depends_on[var].size(); for (size_t i = 0; i < num_affectors; ++i) { int affector = depends_on[var][i]; assert(affector < var); depends_on[var].insert(depends_on[var].end(), depends_on[affector].begin(), depends_on[affector].end()); } sort(depends_on[var].begin(), depends_on[var].end()); depends_on[var].erase(unique(depends_on[var].begin(), depends_on[var].end()), depends_on[var].end()); } cache.resize(var_count); helpful_transition_cache.resize(var_count); for (int var = 0; var < var_count; ++var) { int required_cache_size = compute_required_cache_size( var, depends_on[var], max_cache_size); if (required_cache_size != -1) { cache[var].resize(required_cache_size, NOT_COMPUTED); helpful_transition_cache[var].resize(required_cache_size, nullptr); } } utils::g_log << "done!" << endl; } CGCache::~CGCache() { } int CGCache::compute_required_cache_size( int var_id, const vector<int> &depends_on, int max_cache_size) const { /* Compute the size of the cache required for variable with ID "var_id", which depends on the variables in "depends_on". Requires that the caches for all variables in "depends_on" have already been allocated. Returns -1 if the variable cannot be cached because the required cache size would be too large. */ VariablesProxy variables = task_proxy.get_variables(); int var_domain = variables[var_id].get_domain_size(); if (!utils::is_product_within_limit(var_domain, var_domain - 1, max_cache_size)) return -1; int required_size = var_domain * (var_domain - 1); for (int depend_var_id : depends_on) { int depend_var_domain = variables[depend_var_id].get_domain_size(); /* If var depends on a variable var_i that is not cached, then it cannot be cached. This is possible even if var would have an acceptable cache size because the domain of var_i contributes quadratically to its own cache size but only linearly to the cache size of var. */ if (cache[depend_var_id].empty()) return -1; if (!utils::is_product_within_limit(required_size, depend_var_domain, max_cache_size)) return -1; required_size *= depend_var_domain; } return required_size; } int CGCache::get_index(int var, const State &state, int from_val, int to_val) const { assert(is_cached(var)); assert(from_val != to_val); int index = from_val; int multiplier = task_proxy.get_variables()[var].get_domain_size(); for (int dep_var : depends_on[var]) { index += state[dep_var].get_value() * multiplier; multiplier *= task_proxy.get_variables()[dep_var].get_domain_size(); } if (to_val > from_val) --to_val; index += to_val * multiplier; assert(utils::in_bounds(index, cache[var])); return index; } }
4,501
C++
33.366412
85
0.592979
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/array_pool.h
#ifndef HEURISTICS_ARRAY_POOL_H #define HEURISTICS_ARRAY_POOL_H #include <cassert> #include <vector> /* ArrayPool is intended as a compact representation of a large collection of arrays that are allocated individually but deallocated together. Each array may have a different size, but ArrayPool does not keep track of the array sizes; its user must maintain this information themselves. See the relaxation heuristics for usage examples. If the class turns out to be more generally useful, it could be templatized (currently, ValueType = int is hardcoded) and moved to the algorithms directory. */ namespace array_pool { const int INVALID_INDEX = -1; using Value = int; class ArrayPoolIndex { friend class ArrayPool; int position; ArrayPoolIndex(int position) : position(position) { } public: ArrayPoolIndex() : position(INVALID_INDEX) { } }; class ArrayPoolSlice { public: using Iterator = std::vector<Value>::const_iterator; Iterator begin() { return first; } Iterator end() { return last; } private: friend class ArrayPool; Iterator first; Iterator last; ArrayPoolSlice(Iterator first, Iterator last) : first(first), last(last) { } }; class ArrayPool { std::vector<Value> data; public: ArrayPoolIndex append(const std::vector<Value> &vec) { ArrayPoolIndex index(data.size()); data.insert(data.end(), vec.begin(), vec.end()); return index; } ArrayPoolSlice get_slice(ArrayPoolIndex index, int size) const { assert(index.position >= 0 && size >= 0 && index.position + size <= static_cast<int>(data.size())); return ArrayPoolSlice(data.begin() + index.position, data.begin() + index.position + size); } }; } #endif
1,848
C
23.012987
99
0.661255
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/domain_transition_graph.cc
#include "domain_transition_graph.h" #include "../utils/hash.h" #include "../utils/memory.h" #include <algorithm> #include <cassert> #include <iostream> using namespace std; namespace domain_transition_graph { DTGFactory::DTGFactory(const TaskProxy &task_proxy, bool collect_transition_side_effects, const function<bool(int, int)> &pruning_condition) : task_proxy(task_proxy), collect_transition_side_effects(collect_transition_side_effects), pruning_condition(pruning_condition) { } DTGFactory::DTGs DTGFactory::build_dtgs() { DTGs dtgs; allocate_graphs_and_nodes(dtgs); initialize_index_structures(dtgs.size()); create_transitions(dtgs); simplify_transitions(dtgs); if (collect_transition_side_effects) collect_all_side_effects(dtgs); return dtgs; } void DTGFactory::allocate_graphs_and_nodes(DTGs &dtgs) { VariablesProxy variables = task_proxy.get_variables(); dtgs.resize(variables.size()); for (VariableProxy var : variables) { int var_id = var.get_id(); int range = var.get_domain_size(); dtgs[var_id] = utils::make_unique_ptr<DomainTransitionGraph>(var_id, range); } } void DTGFactory::initialize_index_structures(int num_dtgs) { transition_index.clear(); transition_index.resize(num_dtgs); global_to_local_var.clear(); global_to_local_var.resize(num_dtgs); } void DTGFactory::create_transitions(DTGs &dtgs) { for (OperatorProxy op : task_proxy.get_operators()) for (EffectProxy eff : op.get_effects()) process_effect(eff, op, dtgs); for (OperatorProxy ax : task_proxy.get_axioms()) for (EffectProxy eff : ax.get_effects()) process_effect(eff, ax, dtgs); } void DTGFactory::process_effect(const EffectProxy &eff, const OperatorProxy &op, DTGs &dtgs) { FactProxy fact = eff.get_fact(); int var_id = fact.get_variable().get_id(); DomainTransitionGraph *dtg = dtgs[var_id].get(); int origin = -1; int target = fact.get_value(); vector<LocalAssignment> transition_condition; vector<LocalAssignment> side_effect; unsigned int first_new_local_var = dtg->local_to_global_child.size(); for (FactProxy pre : op.get_preconditions()) { if (pre.get_variable() == fact.get_variable()) origin = pre.get_value(); else update_transition_condition(pre, dtg, transition_condition); } for (FactProxy cond : eff.get_conditions()) { if (cond.get_variable() == fact.get_variable()) { if (origin != -1 && cond.get_value() != origin) { revert_new_local_vars(dtg, first_new_local_var); return; // conflicting condition on effect variable } origin = cond.get_value(); } else { update_transition_condition(cond, dtg, transition_condition); } } if (target == origin) { revert_new_local_vars(dtg, first_new_local_var); return; } if (origin != -1) { ValueTransition *trans = get_transition(origin, target, dtg); trans->labels.push_back( ValueTransitionLabel(op.get_id(), op.is_axiom(), transition_condition, side_effect)); } else { int domain_size = fact.get_variable().get_domain_size(); for (int origin = 0; origin < domain_size; ++origin) { if (origin == target) continue; ValueTransition *trans = get_transition(origin, target, dtg); trans->labels.push_back( ValueTransitionLabel(op.get_id(), op.is_axiom(), transition_condition, side_effect)); } } } void DTGFactory::update_transition_condition(const FactProxy &fact, DomainTransitionGraph *dtg, vector<LocalAssignment> &condition) { int fact_var = fact.get_variable().get_id(); if (!pruning_condition(dtg->var, fact_var)) { extend_global_to_local_mapping_if_necessary(dtg, fact_var); int local_var = global_to_local_var[dtg->var][fact_var]; condition.push_back(LocalAssignment(local_var, fact.get_value())); } } void DTGFactory::extend_global_to_local_mapping_if_necessary( DomainTransitionGraph *dtg, int global_var) { if (!global_to_local_var[dtg->var].count(global_var)) { global_to_local_var[dtg->var][global_var] = dtg->local_to_global_child.size(); dtg->local_to_global_child.push_back(global_var); } } void DTGFactory::revert_new_local_vars(DomainTransitionGraph *dtg, unsigned int first_local_var) { vector<int> &loc_to_glob = dtg->local_to_global_child; for (unsigned int l = first_local_var; l < loc_to_glob.size(); ++l) global_to_local_var[dtg->var].erase(loc_to_glob[l]); if (loc_to_glob.size() > first_local_var) loc_to_glob.erase(loc_to_glob.begin() + first_local_var, loc_to_glob.end()); } ValueTransition *DTGFactory::get_transition(int origin, int target, DomainTransitionGraph *dtg) { utils::HashMap<pair<int, int>, int> &trans_map = transition_index[dtg->var]; pair<int, int> arc = make_pair(origin, target); ValueNode &origin_node = dtg->nodes[origin]; // create new transition if necessary if (!trans_map.count(arc)) { trans_map[arc] = origin_node.transitions.size(); ValueNode &target_node = dtg->nodes[target]; origin_node.transitions.push_back(ValueTransition(&target_node)); } return &origin_node.transitions[trans_map[arc]]; } void DTGFactory::collect_all_side_effects(DTGs &dtgs) { for (auto &dtg : dtgs) { for (auto &node : dtg->nodes) for (auto &transition: node.transitions) collect_side_effects(dtg.get(), transition.labels); } } void DTGFactory::collect_side_effects(DomainTransitionGraph *dtg, vector<ValueTransitionLabel> &labels) { const vector<int> &loc_to_glob = dtg->local_to_global_child; const unordered_map<int, int> &glob_to_loc = global_to_local_var[dtg->var]; for (auto &label : labels) { // create global condition for label vector<FactPair> precond_pairs; for (auto &assignment : label.precond) { int var = loc_to_glob[assignment.local_var]; precond_pairs.emplace_back(var, assignment.value); } sort(precond_pairs.begin(), precond_pairs.end()); // collect operator precondition OperatorProxy op = get_op_for_label(label); unordered_map<int, int> pre_map; for (FactProxy pre : op.get_preconditions()) pre_map[pre.get_variable().get_id()] = pre.get_value(); // collect side effect from each operator effect vector<LocalAssignment> side_effects; for (EffectProxy eff : op.get_effects()) { int var_no = eff.get_fact().get_variable().get_id(); if (var_no == dtg->var || !glob_to_loc.count(var_no)) { // This is either an effect on the variable we're // building the DTG for, or an effect on a variable we // don't need to track because it doesn't appear in // conditions of this DTG. Ignore it. continue; } int pre = -1; auto pre_it = pre_map.find(var_no); if (pre_it != pre_map.end()) pre = pre_it->second; int post = eff.get_fact().get_value(); vector<FactPair> triggercond_pairs; if (pre != -1) triggercond_pairs.emplace_back(var_no, pre); for (FactProxy condition : eff.get_conditions()) { int c_var_id = condition.get_variable().get_id(); int c_val = condition.get_value(); triggercond_pairs.emplace_back(c_var_id, c_val); } sort(triggercond_pairs.begin(), triggercond_pairs.end()); if (includes(precond_pairs.begin(), precond_pairs.end(), triggercond_pairs.begin(), triggercond_pairs.end())) { int local_var = glob_to_loc.at(var_no); side_effects.push_back(LocalAssignment(local_var, post)); } } label.effect = side_effects; } } void DTGFactory::simplify_transitions(DTGs &dtgs) { for (auto &dtg : dtgs) for (ValueNode & node : dtg->nodes) for (ValueTransition & transition : node.transitions) simplify_labels(transition.labels); } OperatorProxy DTGFactory::get_op_for_label(const ValueTransitionLabel &label) { if (label.is_axiom) return task_proxy.get_axioms()[label.op_id]; return task_proxy.get_operators()[label.op_id]; } void DTGFactory::simplify_labels(vector<ValueTransitionLabel> &labels) { // Remove labels with duplicate or dominated conditions. /* Algorithm: Put all transitions into an unordered_map (key: condition, value: index in label list). This already gets rid of transitions with identical conditions. Then go through the unordered_map, checking for each element if none of the subset conditions are part of the unordered_map. Put the element into the new labels list iff this is the case. */ using HashKey = vector<FactPair>; using HashMap = utils::HashMap<HashKey, int>; HashMap label_index; label_index.reserve(labels.size()); for (size_t i = 0; i < labels.size(); ++i) { HashKey key; for (LocalAssignment &assign : labels[i].precond) key.emplace_back(assign.local_var, assign.value); sort(key.begin(), key.end()); label_index[key] = i; } vector<ValueTransitionLabel> old_labels; old_labels.swap(labels); for (auto &entry : label_index) { const HashKey &key = entry.first; int label_no = entry.second; int powerset_size = (1 << key.size()) - 1; // -1: only consider proper subsets bool match = false; if (powerset_size <= 31) { // HACK! Don't spend too much time here... OperatorProxy op = get_op_for_label(old_labels[label_no]); for (int mask = 0; mask < powerset_size; ++mask) { HashKey subset; for (size_t i = 0; i < key.size(); ++i) if (mask & (1 << i)) subset.push_back(key[i]); HashMap::iterator found = label_index.find(subset); if (found != label_index.end()) { const ValueTransitionLabel &f_label = old_labels[found->second]; OperatorProxy f_op = get_op_for_label(f_label); if (op.get_cost() >= f_op.get_cost()) { /* TODO: Depending on how clever we want to be, we could prune based on the *adjusted* cost for the respective heuristic instead. This would potentially allow us more pruning when using unit costs as adjusted costs. Seems a minor optimization though. */ match = true; break; } } } } if (!match) labels.push_back(old_labels[label_no]); } } DomainTransitionGraph::DomainTransitionGraph(int var_index, int node_count) { var = var_index; nodes.reserve(node_count); for (int value = 0; value < node_count; ++value) nodes.push_back(ValueNode(this, value)); last_helpful_transition_extraction_time = -1; } }
11,870
C++
38.307947
101
0.586689
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/hm_heuristic.cc
#include "hm_heuristic.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_utils/task_properties.h" #include "../utils/logging.h" #include <cassert> #include <limits> #include <set> using namespace std; namespace hm_heuristic { HMHeuristic::HMHeuristic(const Options &opts) : Heuristic(opts), m(opts.get<int>("m")), has_cond_effects(task_properties::has_conditional_effects(task_proxy)), goals(task_properties::get_fact_pairs(task_proxy.get_goals())) { utils::g_log << "Using h^" << m << "." << endl; utils::g_log << "The implementation of the h^m heuristic is preliminary." << endl << "It is SLOOOOOOOOOOOW." << endl << "Please do not use this for comparison!" << endl; generate_all_tuples(); } bool HMHeuristic::dead_ends_are_reliable() const { return !task_properties::has_axioms(task_proxy) && !has_cond_effects; } int HMHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); if (task_properties::is_goal_state(task_proxy, state)) { return 0; } else { Tuple s_tup = task_properties::get_fact_pairs(state); init_hm_table(s_tup); update_hm_table(); int h = eval(goals); if (h == numeric_limits<int>::max()) return DEAD_END; return h; } } void HMHeuristic::init_hm_table(const Tuple &t) { for (auto &hm_ent : hm_table) { const Tuple &tuple = hm_ent.first; int h_val = check_tuple_in_tuple(tuple, t); hm_table[tuple] = h_val; } } void HMHeuristic::update_hm_table() { int round = 0; do { ++round; was_updated = false; for (OperatorProxy op : task_proxy.get_operators()) { Tuple pre = get_operator_pre(op); int c1 = eval(pre); if (c1 != numeric_limits<int>::max()) { Tuple eff = get_operator_eff(op); vector<Tuple> partial_effs; generate_all_partial_tuples(eff, partial_effs); for (Tuple &partial_eff : partial_effs) { update_hm_entry(partial_eff, c1 + op.get_cost()); int eff_size = partial_eff.size(); if (eff_size < m) { extend_tuple(partial_eff, op); } } } } } while (was_updated); } void HMHeuristic::extend_tuple(const Tuple &t, const OperatorProxy &op) { for (auto &hm_ent : hm_table) { const Tuple &tuple = hm_ent.first; bool contradict = false; for (const FactPair &fact : tuple) { if (contradict_effect_of(op, fact.var, fact.value)) { contradict = true; break; } } if (!contradict && (tuple.size() > t.size()) && (check_tuple_in_tuple(t, tuple) == 0)) { Tuple pre = get_operator_pre(op); Tuple others; for (const FactPair &fact : tuple) { if (find(t.begin(), t.end(), fact) == t.end()) { others.push_back(fact); if (find(pre.begin(), pre.end(), fact) == pre.end()) { pre.push_back(fact); } } } sort(pre.begin(), pre.end()); set<int> vars; bool is_valid = true; for (const FactPair &fact : pre) { if (vars.count(fact.var) != 0) { is_valid = false; break; } vars.insert(fact.var); } if (is_valid) { int c2 = eval(pre); if (c2 != numeric_limits<int>::max()) { update_hm_entry(tuple, c2 + op.get_cost()); } } } } } int HMHeuristic::eval(const Tuple &t) const { vector<Tuple> partial; generate_all_partial_tuples(t, partial); int max = 0; for (Tuple &tuple : partial) { assert(hm_table.count(tuple) == 1); int h = hm_table.at(tuple); if (h > max) { max = h; } } return max; } int HMHeuristic::update_hm_entry(const Tuple &t, int val) { assert(hm_table.count(t) == 1); if (hm_table[t] > val) { hm_table[t] = val; was_updated = true; } return val; } int HMHeuristic::check_tuple_in_tuple( const Tuple &tuple, const Tuple &big_tuple) const { for (const FactPair &fact0 : tuple) { bool found = false; for (auto &fact1 : big_tuple) { if (fact0 == fact1) { found = true; break; } } if (!found) { return numeric_limits<int>::max(); } } return 0; } HMHeuristic::Tuple HMHeuristic::get_operator_pre(const OperatorProxy &op) const { Tuple preconditions = task_properties::get_fact_pairs(op.get_preconditions()); sort(preconditions.begin(), preconditions.end()); return preconditions; } HMHeuristic::Tuple HMHeuristic::get_operator_eff(const OperatorProxy &op) const { Tuple effects; for (EffectProxy eff : op.get_effects()) { effects.push_back(eff.get_fact().get_pair()); } sort(effects.begin(), effects.end()); return effects; } bool HMHeuristic::contradict_effect_of( const OperatorProxy &op, int var, int val) const { for (EffectProxy eff : op.get_effects()) { FactProxy fact = eff.get_fact(); if (fact.get_variable().get_id() == var && fact.get_value() != val) { return true; } } return false; } void HMHeuristic::generate_all_tuples() { Tuple t; generate_all_tuples_aux(0, m, t); } void HMHeuristic::generate_all_tuples_aux(int var, int sz, const Tuple &base) { int num_variables = task_proxy.get_variables().size(); for (int i = var; i < num_variables; ++i) { int domain_size = task_proxy.get_variables()[i].get_domain_size(); for (int j = 0; j < domain_size; ++j) { Tuple tuple(base); tuple.emplace_back(i, j); hm_table[tuple] = 0; if (sz > 1) { generate_all_tuples_aux(i + 1, sz - 1, tuple); } } } } void HMHeuristic::generate_all_partial_tuples( const Tuple &base_tuple, vector<Tuple> &res) const { Tuple t; generate_all_partial_tuples_aux(base_tuple, t, 0, m, res); } void HMHeuristic::generate_all_partial_tuples_aux( const Tuple &base_tuple, const Tuple &t, int index, int sz, vector<Tuple> &res) const { if (sz == 1) { for (size_t i = index; i < base_tuple.size(); ++i) { Tuple tuple(t); tuple.push_back(base_tuple[i]); res.push_back(tuple); } } else { for (size_t i = index; i < base_tuple.size(); ++i) { Tuple tuple(t); tuple.push_back(base_tuple[i]); res.push_back(tuple); generate_all_partial_tuples_aux(base_tuple, tuple, i + 1, sz - 1, res); } } } void HMHeuristic::dump_table() const { for (auto &hm_ent : hm_table) { utils::g_log << "h(" << hm_ent.first << ") = " << hm_ent.second << endl; } } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("h^m heuristic", ""); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "ignored"); parser.document_language_support("axioms", "ignored"); parser.document_property("admissible", "yes for tasks without conditional " "effects or axioms"); parser.document_property("consistent", "yes for tasks without conditional " "effects or axioms"); parser.document_property("safe", "yes for tasks without conditional " "effects or axioms"); parser.document_property("preferred operators", "no"); parser.add_option<int>("m", "subset size", "2", Bounds("1", "infinity")); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<HMHeuristic>(opts); } static Plugin<Evaluator> _plugin("hm", _parse); }
8,477
C++
28.134021
96
0.533797
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/cg_heuristic.cc
#include "cg_heuristic.h" #include "cg_cache.h" #include "domain_transition_graph.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_utils/task_properties.h" #include "../utils/logging.h" #include <algorithm> #include <cassert> #include <limits> #include <vector> using namespace std; using namespace domain_transition_graph; namespace cg_heuristic { CGHeuristic::CGHeuristic(const Options &opts) : Heuristic(opts), cache_hits(0), cache_misses(0), helpful_transition_extraction_counter(0), min_action_cost(task_properties::get_min_operator_cost(task_proxy)) { utils::g_log << "Initializing causal graph heuristic..." << endl; int max_cache_size = opts.get<int>("max_cache_size"); if (max_cache_size > 0) cache = utils::make_unique_ptr<CGCache>(task_proxy, max_cache_size); unsigned int num_vars = task_proxy.get_variables().size(); prio_queues.reserve(num_vars); for (size_t i = 0; i < num_vars; ++i) prio_queues.push_back(utils::make_unique_ptr<ValueNodeQueue>()); function<bool(int, int)> pruning_condition = [](int dtg_var, int cond_var) {return dtg_var <= cond_var;}; DTGFactory factory(task_proxy, false, pruning_condition); transition_graphs = factory.build_dtgs(); } CGHeuristic::~CGHeuristic() { } bool CGHeuristic::dead_ends_are_reliable() const { return false; } int CGHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); setup_domain_transition_graphs(); int heuristic = 0; for (FactProxy goal : task_proxy.get_goals()) { const VariableProxy var = goal.get_variable(); int var_no = var.get_id(); int from = state[var_no].get_value(), to = goal.get_value(); DomainTransitionGraph *dtg = transition_graphs[var_no].get(); int cost_for_goal = get_transition_cost(state, dtg, from, to); if (cost_for_goal == numeric_limits<int>::max()) { return DEAD_END; } else { heuristic += cost_for_goal; mark_helpful_transitions(state, dtg, to); } } return heuristic; } void CGHeuristic::setup_domain_transition_graphs() { for (auto &dtg : transition_graphs) { for (auto &node : dtg->nodes) { node.distances.clear(); node.helpful_transitions.clear(); } } // Reset "dirty bits" for helpful transitions. ++helpful_transition_extraction_counter; } int CGHeuristic::get_transition_cost(const State &state, DomainTransitionGraph *dtg, int start_val, int goal_val) { if (start_val == goal_val) return 0; int var_no = dtg->var; // Check cache. bool use_the_cache = cache && cache->is_cached(var_no); if (use_the_cache) { int cached_val = cache->lookup(var_no, state, start_val, goal_val); if (cached_val != CGCache::NOT_COMPUTED) { ++cache_hits; return cached_val; } } else { ++cache_misses; } ValueNode *start = &dtg->nodes[start_val]; if (start->distances.empty()) { // Initialize data of initial node. start->distances.resize(dtg->nodes.size(), numeric_limits<int>::max()); start->helpful_transitions.resize(dtg->nodes.size(), 0); start->distances[start_val] = 0; start->reached_from = 0; start->reached_by = 0; start->children_state.resize(dtg->local_to_global_child.size()); for (size_t i = 0; i < dtg->local_to_global_child.size(); ++i) { start->children_state[i] = state[dtg->local_to_global_child[i]].get_value(); } // Initialize Heap for Dijkstra's algorithm. priority_queues::AdaptiveQueue<ValueNode *> &prio_queue = *prio_queues[var_no]; prio_queue.clear(); prio_queue.push(0, start); // Dijkstra algorithm main loop. while (!prio_queue.empty()) { pair<int, ValueNode *> top_pair = prio_queue.pop(); int source_distance = top_pair.first; ValueNode *source = top_pair.second; assert(start->distances[source->value] <= source_distance); if (start->distances[source->value] < source_distance) continue; ValueTransitionLabel *current_helpful_transition = start->helpful_transitions[source->value]; // Set children state for all nodes but the initial. if (source->value != start_val) { source->children_state = source->reached_from->children_state; vector<LocalAssignment> &precond = source->reached_by->precond; for (const LocalAssignment &assign : precond) source->children_state[assign.local_var] = assign.value; } // Scan outgoing transitions. for (ValueTransition &transition : source->transitions) { ValueNode *target = transition.target; int *target_distance_ptr = &start->distances[target->value]; // Scan labels of the transition. for (ValueTransitionLabel &label : transition.labels) { OperatorProxy op = label.is_axiom ? task_proxy.get_axioms()[label.op_id] : task_proxy.get_operators()[label.op_id]; int new_distance = source_distance + op.get_cost(); for (LocalAssignment &assignment : label.precond) { if (new_distance >= *target_distance_ptr) break; // We already know this isn't an improved path. int local_var = assignment.local_var; int current_val = source->children_state[local_var]; int global_var = dtg->local_to_global_child[local_var]; DomainTransitionGraph *precond_dtg = transition_graphs[global_var].get(); int recursive_cost = get_transition_cost( state, precond_dtg, current_val, assignment.value); if (recursive_cost == numeric_limits<int>::max()) new_distance = numeric_limits<int>::max(); else new_distance += recursive_cost; } if (new_distance < min_action_cost) { /* If the cost is lower than the min action cost, we know we're too optimistic, so we might as well increase it. This helps quite a bit in PSR-Large, apparently, which is why this is in, but this should probably an option. TODO: Evaluate impact of this. */ new_distance = min_action_cost; } if (*target_distance_ptr > new_distance) { // Update node in heap and update its internal state. *target_distance_ptr = new_distance; target->reached_from = source; target->reached_by = &label; if (current_helpful_transition == 0) { // This transition starts at the start node; // no helpful transitions recorded yet. start->helpful_transitions[target->value] = &label; } else { start->helpful_transitions[target->value] = current_helpful_transition; } prio_queue.push(new_distance, target); } } } } } if (use_the_cache) { int num_values = start->distances.size(); for (int val = 0; val < num_values; ++val) { if (val == start_val) continue; int distance = start->distances[val]; ValueTransitionLabel *helpful = start->helpful_transitions[val]; // We should have a helpful transition iff distance is infinite. assert((distance == numeric_limits<int>::max()) == !helpful); cache->store(var_no, state, start_val, val, distance); cache->store_helpful_transition( var_no, state, start_val, val, helpful); } } return start->distances[goal_val]; } void CGHeuristic::mark_helpful_transitions(const State &state, DomainTransitionGraph *dtg, int to) { int var_no = dtg->var; int from = state[var_no].get_value(); if (from == to) return; /* Avoid checking helpful transitions for the same variable twice via different paths of recursion. Interestingly, this technique even blocks further calls with the same variable *if the to value is different*. This looks wrong, but in first, very preliminary tests, this appeared better in terms of evaluations than not blocking such calls. Maybe it's better to pick only a few preferred operators since this focuses search more? TODO: Test this more systematically. An easy way to test this is by simply removing the following test-and-return. Of course, this also has a performance impact, so the correct way to test this is by looking at evaluations/expansions only. If it turns out that this is an interesting choice, we should look into this more deeply and maybe turn this into an option. */ if (dtg->last_helpful_transition_extraction_time == helpful_transition_extraction_counter) return; dtg->last_helpful_transition_extraction_time = helpful_transition_extraction_counter; ValueTransitionLabel *helpful; int cost; // Check cache. if (cache && cache->is_cached(var_no)) { helpful = cache->lookup_helpful_transition(var_no, state, from, to); cost = cache->lookup(var_no, state, from, to); assert(helpful); } else { ValueNode *start_node = &dtg->nodes[from]; assert(!start_node->helpful_transitions.empty()); helpful = start_node->helpful_transitions[to]; cost = start_node->distances[to]; } OperatorProxy op = helpful->is_axiom ? task_proxy.get_axioms()[helpful->op_id] : task_proxy.get_operators()[helpful->op_id]; if (cost == op.get_cost() && !op.is_axiom() && task_properties::is_applicable(op, state)) { // Transition immediately applicable, all preconditions true. set_preferred(op); } else { // Recursively compute helpful transitions for the precondition variables. for (const LocalAssignment &assignment : helpful->precond) { int local_var = assignment.local_var; int global_var = dtg->local_to_global_child[local_var]; DomainTransitionGraph *precond_dtg = transition_graphs[global_var].get(); mark_helpful_transitions(state, precond_dtg, assignment.value); } } } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("Causal graph heuristic", ""); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "supported"); parser.document_language_support( "axioms", "supported (in the sense that the planner won't complain -- " "handling of axioms might be very stupid " "and even render the heuristic unsafe)"); parser.document_property("admissible", "no"); parser.document_property("consistent", "no"); parser.document_property("safe", "no"); parser.document_property("preferred operators", "yes"); parser.add_option<int>( "max_cache_size", "maximum number of cached entries per variable (set to 0 to disable cache)", "1000000", Bounds("0", "infinity")); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<CGHeuristic>(opts); } static Plugin<Evaluator> _plugin("cg", _parse); }
12,563
C++
38.38558
99
0.573112
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/max_heuristic.cc
#include "max_heuristic.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/logging.h" #include <cassert> #include <vector> using namespace std; namespace max_heuristic { /* TODO: At the time of this writing, this shares huge amounts of code with h^add, and the two should be refactored so that the common code is only included once, in so far as this is possible without sacrificing run-time. We may want to avoid virtual calls in the inner-most loops; maybe a templated strategy pattern is an option. Right now, the only differences to the h^add code are the use of max() instead of add() and the lack of preferred operator support (but we might actually reintroduce that if it doesn't hurt performance too much). */ // construction and destruction HSPMaxHeuristic::HSPMaxHeuristic(const Options &opts) : RelaxationHeuristic(opts) { utils::g_log << "Initializing HSP max heuristic..." << endl; } // heuristic computation void HSPMaxHeuristic::setup_exploration_queue() { queue.clear(); for (Proposition &prop : propositions) prop.cost = -1; // Deal with operators and axioms without preconditions. for (UnaryOperator &op : unary_operators) { op.unsatisfied_preconditions = op.num_preconditions; op.cost = op.base_cost; // will be increased by precondition costs if (op.unsatisfied_preconditions == 0) enqueue_if_necessary(op.effect, op.base_cost); } } void HSPMaxHeuristic::setup_exploration_queue_state(const State &state) { for (FactProxy fact : state) { PropID init_prop = get_prop_id(fact); enqueue_if_necessary(init_prop, 0); } } void HSPMaxHeuristic::relaxed_exploration() { int unsolved_goals = goal_propositions.size(); while (!queue.empty()) { pair<int, PropID> top_pair = queue.pop(); int distance = top_pair.first; PropID prop_id = top_pair.second; Proposition *prop = get_proposition(prop_id); int prop_cost = prop->cost; assert(prop_cost >= 0); assert(prop_cost <= distance); if (prop_cost < distance) continue; if (prop->is_goal && --unsolved_goals == 0) return; for (OpID op_id : precondition_of_pool.get_slice( prop->precondition_of, prop->num_precondition_occurences)) { UnaryOperator *unary_op = get_operator(op_id); unary_op->cost = max(unary_op->cost, unary_op->base_cost + prop_cost); --unary_op->unsatisfied_preconditions; assert(unary_op->unsatisfied_preconditions >= 0); if (unary_op->unsatisfied_preconditions == 0) enqueue_if_necessary(unary_op->effect, unary_op->cost); } } } int HSPMaxHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); setup_exploration_queue(); setup_exploration_queue_state(state); relaxed_exploration(); int total_cost = 0; for (PropID goal_id : goal_propositions) { const Proposition *goal = get_proposition(goal_id); int goal_cost = goal->cost; if (goal_cost == -1) return DEAD_END; total_cost = max(total_cost, goal_cost); } return total_cost; } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis("Max heuristic", ""); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "supported"); parser.document_language_support( "axioms", "supported (in the sense that the planner won't complain -- " "handling of axioms might be very stupid " "and even render the heuristic unsafe)"); parser.document_property("admissible", "yes for tasks without axioms"); parser.document_property("consistent", "yes for tasks without axioms"); parser.document_property("safe", "yes for tasks without axioms"); parser.document_property("preferred operators", "no"); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<HSPMaxHeuristic>(opts); } static Plugin<Evaluator> _plugin("hmax", _parse); }
4,398
C++
33.912698
77
0.645293
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/ff_heuristic.h
#ifndef HEURISTICS_FF_HEURISTIC_H #define HEURISTICS_FF_HEURISTIC_H #include "additive_heuristic.h" #include <vector> namespace ff_heuristic { using relaxation_heuristic::PropID; using relaxation_heuristic::OpID; using relaxation_heuristic::NO_OP; using relaxation_heuristic::Proposition; using relaxation_heuristic::UnaryOperator; /* TODO: In a better world, this should not derive from AdditiveHeuristic. Rather, the common parts should be implemented in a common base class. That refactoring could be made at the same time at which we also unify this with the other relaxation heuristics and the additional FF heuristic implementation in the landmark code. */ class FFHeuristic : public additive_heuristic::AdditiveHeuristic { // Relaxed plans are represented as a set of operators implemented // as a bit vector. using RelaxedPlan = std::vector<bool>; RelaxedPlan relaxed_plan; void mark_preferred_operators_and_relaxed_plan( const State &state, PropID goal_id); protected: virtual int compute_heuristic(const State &ancestor_state) override; public: explicit FFHeuristic(const options::Options &opts); }; } #endif
1,204
C
29.124999
72
0.745017
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/domain_transition_graph.h
#ifndef HEURISTICS_DOMAIN_TRANSITION_GRAPH_H #define HEURISTICS_DOMAIN_TRANSITION_GRAPH_H #include "../task_proxy.h" #include <cassert> #include <unordered_map> #include <vector> namespace cea_heuristic { class ContextEnhancedAdditiveHeuristic; } namespace cg_heuristic { class CGHeuristic; } namespace domain_transition_graph { struct LocalAssignment; struct ValueNode; struct ValueTransition; struct ValueTransitionLabel; class DomainTransitionGraph; // Note: We do not use references but pointers to refer to the "parents" of // transitions and nodes. This is because these structures could not be // put into vectors otherwise. class DTGFactory { using DTGs = std::vector<std::unique_ptr<DomainTransitionGraph>>; const TaskProxy &task_proxy; bool collect_transition_side_effects; std::function<bool(int, int)> pruning_condition; std::vector<utils::HashMap<std::pair<int, int>, int>> transition_index; std::vector<std::unordered_map<int, int>> global_to_local_var; void allocate_graphs_and_nodes(DTGs &dtgs); void initialize_index_structures(int num_dtgs); void create_transitions(DTGs &dtgs); void process_effect(const EffectProxy &eff, const OperatorProxy &op, DTGs &dtgs); void update_transition_condition(const FactProxy &fact, DomainTransitionGraph *dtg, std::vector<LocalAssignment> &condition); void extend_global_to_local_mapping_if_necessary( DomainTransitionGraph *dtg, int global_var); void revert_new_local_vars(DomainTransitionGraph *dtg, unsigned int first_local_var); ValueTransition *get_transition(int origin, int target, DomainTransitionGraph *dtg); void simplify_transitions(DTGs &dtgs); void simplify_labels(std::vector<ValueTransitionLabel> &labels); void collect_all_side_effects(DTGs &dtgs); void collect_side_effects(DomainTransitionGraph *dtg, std::vector<ValueTransitionLabel> &labels); OperatorProxy get_op_for_label(const ValueTransitionLabel &label); public: DTGFactory(const TaskProxy &task_proxy, bool collect_transition_side_effects, const std::function<bool(int, int)> &pruning_condition); DTGs build_dtgs(); }; struct LocalAssignment { short local_var; short value; LocalAssignment(int var, int val) : local_var(var), value(val) { // Check overflow. assert(local_var == var); assert(value == val); } }; struct ValueTransitionLabel { int op_id; bool is_axiom; std::vector<LocalAssignment> precond; std::vector<LocalAssignment> effect; ValueTransitionLabel(int op_id, bool axiom, const std::vector<LocalAssignment> &precond, const std::vector<LocalAssignment> &effect) : op_id(op_id), is_axiom(axiom), precond(precond), effect(effect) {} }; struct ValueTransition { ValueNode *target; std::vector<ValueTransitionLabel> labels; ValueTransition(ValueNode *targ) : target(targ) {} void simplify(const TaskProxy &task_proxy); }; struct ValueNode { DomainTransitionGraph *parent_graph; int value; std::vector<ValueTransition> transitions; std::vector<int> distances; std::vector<ValueTransitionLabel *> helpful_transitions; std::vector<int> children_state; ValueNode *reached_from; ValueTransitionLabel *reached_by; ValueNode(DomainTransitionGraph *parent, int val) : parent_graph(parent), value(val), reached_from(0), reached_by(0) {} }; class DomainTransitionGraph { friend class cg_heuristic::CGHeuristic; friend class cea_heuristic::ContextEnhancedAdditiveHeuristic; friend class DTGFactory; int var; std::vector<ValueNode> nodes; int last_helpful_transition_extraction_time; std::vector<int> local_to_global_child; // used for mapping variables in conditions to their global index // (only needed for initializing child_state for the start node?) DomainTransitionGraph(const DomainTransitionGraph &other); // copying forbidden public: DomainTransitionGraph(int var_index, int node_count); }; } #endif
4,319
C
30.304348
83
0.682102
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/cg_heuristic.h
#ifndef HEURISTICS_CG_HEURISTIC_H #define HEURISTICS_CG_HEURISTIC_H #include "../heuristic.h" #include "../algorithms/priority_queues.h" #include <memory> #include <string> #include <vector> namespace domain_transition_graph { class DomainTransitionGraph; struct ValueNode; } namespace cg_heuristic { class CGCache; class CGHeuristic : public Heuristic { using ValueNodeQueue = priority_queues::AdaptiveQueue<domain_transition_graph::ValueNode *>; std::vector<std::unique_ptr<ValueNodeQueue>> prio_queues; std::vector<std::unique_ptr<domain_transition_graph::DomainTransitionGraph>> transition_graphs; std::unique_ptr<CGCache> cache; int cache_hits; int cache_misses; int helpful_transition_extraction_counter; int min_action_cost; void setup_domain_transition_graphs(); int get_transition_cost( const State &state, domain_transition_graph::DomainTransitionGraph *dtg, int start_val, int goal_val); void mark_helpful_transitions( const State &state, domain_transition_graph::DomainTransitionGraph *dtg, int to); protected: virtual int compute_heuristic(const State &ancestor_state) override; public: explicit CGHeuristic(const options::Options &opts); ~CGHeuristic(); virtual bool dead_ends_are_reliable() const override; }; } #endif
1,361
C
24.698113
99
0.717855
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristics/cg_cache.h
#ifndef HEURISTICS_CG_CACHE_H #define HEURISTICS_CG_CACHE_H #include "../task_proxy.h" #include <vector> namespace domain_transition_graph { struct ValueTransitionLabel; } namespace cg_heuristic { class CGCache { TaskProxy task_proxy; std::vector<std::vector<int>> cache; std::vector<std::vector<domain_transition_graph::ValueTransitionLabel *>> helpful_transition_cache; std::vector<std::vector<int>> depends_on; int get_index(int var, const State &state, int from_val, int to_val) const; int compute_required_cache_size( int var_id, const std::vector<int> &depends_on, int max_cache_size) const; public: static const int NOT_COMPUTED = -2; CGCache(const TaskProxy &task_proxy, int max_cache_size); ~CGCache(); bool is_cached(int var) const { return !cache[var].empty(); } int lookup(int var, const State &state, int from_val, int to_val) const { return cache[var][get_index(var, state, from_val, to_val)]; } void store(int var, const State &state, int from_val, int to_val, int cost) { cache[var][get_index(var, state, from_val, to_val)] = cost; } domain_transition_graph::ValueTransitionLabel *lookup_helpful_transition( int var, const State &state, int from_val, int to_val) const { int index = get_index(var, state, from_val, to_val); return helpful_transition_cache[var][index]; } void store_helpful_transition( int var, const State &state, int from_val, int to_val, domain_transition_graph::ValueTransitionLabel *helpful_transition) { int index = get_index(var, state, from_val, to_val); helpful_transition_cache[var][index] = helpful_transition; } }; } #endif
1,759
C
29.877192
103
0.66174
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/stubborn_sets.cc
#include "stubborn_sets.h" #include "../option_parser.h" #include "../task_utils/task_properties.h" #include "../utils/collections.h" #include "../utils/logging.h" #include <algorithm> #include <cassert> using namespace std; namespace stubborn_sets { // Relies on both fact sets being sorted by variable. bool contain_conflicting_fact(const vector<FactPair> &facts1, const vector<FactPair> &facts2) { auto facts1_it = facts1.begin(); auto facts2_it = facts2.begin(); while (facts1_it != facts1.end() && facts2_it != facts2.end()) { if (facts1_it->var < facts2_it->var) { ++facts1_it; } else if (facts1_it->var > facts2_it->var) { ++facts2_it; } else { if (facts1_it->value != facts2_it->value) return true; ++facts1_it; ++facts2_it; } } return false; } StubbornSets::StubbornSets(const options::Options &opts) : min_required_pruning_ratio(opts.get<double>("min_required_pruning_ratio")), num_expansions_before_checking_pruning_ratio( opts.get<int>("expansions_before_checking_pruning_ratio")), num_pruning_calls(0), is_pruning_disabled(false), timer(false) { } void StubbornSets::initialize(const shared_ptr<AbstractTask> &task) { PruningMethod::initialize(task); TaskProxy task_proxy(*task); task_properties::verify_no_axioms(task_proxy); task_properties::verify_no_conditional_effects(task_proxy); num_operators = task_proxy.get_operators().size(); num_unpruned_successors_generated = 0; num_pruned_successors_generated = 0; sorted_goals = utils::sorted<FactPair>( task_properties::get_fact_pairs(task_proxy.get_goals())); compute_sorted_operators(task_proxy); compute_achievers(task_proxy); } // Relies on op_preconds and op_effects being sorted by variable. bool StubbornSets::can_disable(int op1_no, int op2_no) const { return contain_conflicting_fact(sorted_op_effects[op1_no], sorted_op_preconditions[op2_no]); } // Relies on op_effect being sorted by variable. bool StubbornSets::can_conflict(int op1_no, int op2_no) const { return contain_conflicting_fact(sorted_op_effects[op1_no], sorted_op_effects[op2_no]); } void StubbornSets::compute_sorted_operators(const TaskProxy &task_proxy) { OperatorsProxy operators = task_proxy.get_operators(); sorted_op_preconditions = utils::map_vector<vector<FactPair>>( operators, [](const OperatorProxy &op) { return utils::sorted<FactPair>( task_properties::get_fact_pairs(op.get_preconditions())); }); sorted_op_effects = utils::map_vector<vector<FactPair>>( operators, [](const OperatorProxy &op) { return utils::sorted<FactPair>( utils::map_vector<FactPair>( op.get_effects(), [](const EffectProxy &eff) {return eff.get_fact().get_pair();})); }); } void StubbornSets::compute_achievers(const TaskProxy &task_proxy) { achievers = utils::map_vector<vector<vector<int>>>( task_proxy.get_variables(), [](const VariableProxy &var) { return vector<vector<int>>(var.get_domain_size()); }); for (const OperatorProxy op : task_proxy.get_operators()) { for (const EffectProxy effect : op.get_effects()) { FactPair fact = effect.get_fact().get_pair(); achievers[fact.var][fact.value].push_back(op.get_id()); } } } bool StubbornSets::mark_as_stubborn(int op_no) { if (!stubborn[op_no]) { stubborn[op_no] = true; stubborn_queue.push_back(op_no); return true; } return false; } void StubbornSets::prune_operators( const State &state, vector<OperatorID> &op_ids) { if (is_pruning_disabled) { return; } if (min_required_pruning_ratio > 0. && num_pruning_calls == num_expansions_before_checking_pruning_ratio) { double pruning_ratio = (num_unpruned_successors_generated == 0) ? 1. : 1. - ( static_cast<double>(num_pruned_successors_generated) / static_cast<double>(num_unpruned_successors_generated)); utils::g_log << "Pruning ratio after " << num_expansions_before_checking_pruning_ratio << " calls: " << pruning_ratio << endl; if (pruning_ratio < min_required_pruning_ratio) { utils::g_log << "-- pruning ratio is lower than minimum pruning ratio (" << min_required_pruning_ratio << ") -> switching off pruning" << endl; is_pruning_disabled = true; } } timer.resume(); num_unpruned_successors_generated += op_ids.size(); ++num_pruning_calls; // Clear stubborn set from previous call. stubborn.assign(num_operators, false); assert(stubborn_queue.empty()); initialize_stubborn_set(state); /* Iteratively insert operators to stubborn according to the definition of strong stubborn sets until a fixpoint is reached. */ while (!stubborn_queue.empty()) { int op_no = stubborn_queue.back(); stubborn_queue.pop_back(); handle_stubborn_operator(state, op_no); } // Now check which applicable operators are in the stubborn set. vector<OperatorID> remaining_op_ids; remaining_op_ids.reserve(op_ids.size()); for (OperatorID op_id : op_ids) { if (stubborn[op_id.get_index()]) { remaining_op_ids.emplace_back(op_id); } } op_ids.swap(remaining_op_ids); num_pruned_successors_generated += op_ids.size(); timer.stop(); } void StubbornSets::print_statistics() const { utils::g_log << "total successors before partial-order reduction: " << num_unpruned_successors_generated << endl << "total successors after partial-order reduction: " << num_pruned_successors_generated << endl; double pruning_ratio = (num_unpruned_successors_generated == 0) ? 1. : 1. - ( static_cast<double>(num_pruned_successors_generated) / static_cast<double>(num_unpruned_successors_generated)); utils::g_log << "Pruning ratio: " << pruning_ratio << endl; utils::g_log << "Time for pruning operators: " << timer << endl; } void add_pruning_options(options::OptionParser &parser) { parser.document_note( "Automatically disable pruning", "Using stubborn sets to prune operators often reduces the required" " number of expansions but computing the prunable operators has a" " non-negligible runtime overhead. Whether the decrease in expansions" " outweighs the increased computational costs depends on the task at" " hand. Using the options 'min_required_pruning_ratio' (M) and" " 'expansions_before_checking_pruning_ratio' (E) it is possible to" " automatically disable pruning after E expansions if the ratio of" " pruned vs. non-pruned operators is lower than M. In detail, let B and" " A be the total number of operators before and after pruning summed" " over all previous expansions. We call 1-(A/B) the pruning ratio R. If" " R is lower than M after E expansions, we disable pruning for all" " subsequent expansions, i.e., consider all applicable operators when" " generating successor states. By default, pruning is never disabled" " (min_required_pruning_ratio = 0.0). In experiments on IPC benchmarks," " stronger results have been observed with automatic disabling" " (min_required_pruning_ratio = 0.2," " expansions_before_checking_pruning_ratio=1000)."); parser.add_option<double>( "min_required_pruning_ratio", "disable pruning if the pruning ratio is lower than this value after" " 'expansions_before_checking_pruning_ratio' expansions", "0.0", Bounds("0.0", "1.0")); parser.add_option<int>( "expansions_before_checking_pruning_ratio", "number of expansions before deciding whether to disable pruning", "1000", Bounds("0", "infinity")); } }
8,254
C++
38.309524
95
0.631451
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/stubborn_sets_atom_centric.h
#ifndef PRUNING_STUBBORN_SETS_ATOM_CENTRIC_H #define PRUNING_STUBBORN_SETS_ATOM_CENTRIC_H #include "stubborn_sets.h" namespace stubborn_sets_atom_centric { static const int MARKED_VALUES_NONE = -2; static const int MARKED_VALUES_ALL = -1; // See the .cc file for an explanation of the strategies. enum class AtomSelectionStrategy { FAST_DOWNWARD, QUICK_SKIP, STATIC_SMALL, DYNAMIC_SMALL }; class StubbornSetsAtomCentric : public stubborn_sets::StubbornSets { const bool use_sibling_shortcut; const AtomSelectionStrategy atom_selection_strategy; // consumers[v][d] contains the ID of operator o if pre(o) contains the fact v=d. std::vector<std::vector<std::vector<int>>> consumers; /* Marked producer and consumer facts. marked_{producers,consumers}[v][d] is true iff fact v=d is marked. */ std::vector<std::vector<bool>> marked_producers; std::vector<std::vector<bool>> marked_consumers; /* Data structures for shortcut handling of siblings. marked_*_variables[v] = d iff all sibling facts v=d' with d'!=d are marked marked_*_variables[v] = MARKED_VALUES_ALL iff all facts for v are marked marked_*_variables[v] = MARKED_VALUES_NONE iff we have no such information */ std::vector<int> marked_producer_variables; std::vector<int> marked_consumer_variables; std::vector<FactPair> producer_queue; std::vector<FactPair> consumer_queue; void compute_consumers(const TaskProxy &task_proxy); bool operator_is_applicable(int op, const State &state) const; void enqueue_producers(const FactPair &fact); void enqueue_consumers(const FactPair &fact); void enqueue_sibling_consumers(const FactPair &fact); void enqueue_sibling_producers(const FactPair &fact); FactPair select_fact(const std::vector<FactPair> &facts, const State &state) const; void enqueue_nes(int op, const State &state); void enqueue_interferers(int op); protected: virtual void initialize_stubborn_set(const State &state) override; virtual void handle_stubborn_operator(const State &state, int op) override; public: explicit StubbornSetsAtomCentric(const options::Options &opts); virtual void initialize(const std::shared_ptr<AbstractTask> &task) override; }; } #endif
2,296
C
36.048387
87
0.721254
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/stubborn_sets_ec.cc
#include "stubborn_sets_ec.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/collections.h" #include "../utils/logging.h" #include "../utils/markup.h" #include <cassert> #include <unordered_map> using namespace std; namespace stubborn_sets_ec { // DTGs are stored as one adjacency list per value. using StubbornDTG = vector<vector<int>>; static inline bool is_v_applicable(int var, int op_no, const State &state, vector<vector<int>> &preconditions) { int precondition_on_var = preconditions[op_no][var]; return precondition_on_var == -1 || precondition_on_var == state[var].get_value(); } vector<StubbornDTG> build_dtgs(TaskProxy task_proxy) { /* NOTE: Code lifted and adapted from M&S atomic abstraction code. We need a more general mechanism for creating data structures of this kind. */ /* NOTE: for stubborn sets ec, the DTG for v *does* include self-loops from d to d if there is an operator that sets the value of v to d and has no precondition on v. This is different from the usual DTG definition. */ // Create the empty DTG nodes. vector<StubbornDTG> dtgs = utils::map_vector<StubbornDTG>( task_proxy.get_variables(), [](const VariableProxy &var) { return StubbornDTG(var.get_domain_size()); }); // Add DTG arcs. for (OperatorProxy op : task_proxy.get_operators()) { unordered_map<int, int> preconditions; for (FactProxy pre : op.get_preconditions()) { preconditions[pre.get_variable().get_id()] = pre.get_value(); } for (EffectProxy effect : op.get_effects()) { FactProxy fact = effect.get_fact(); VariableProxy var = fact.get_variable(); int var_id = var.get_id(); int eff_val = fact.get_value(); int pre_val = utils::get_value_or_default(preconditions, var_id, -1); StubbornDTG &dtg = dtgs[var_id]; if (pre_val == -1) { int num_values = var.get_domain_size(); for (int value = 0; value < num_values; ++value) { dtg[value].push_back(eff_val); } } else { dtg[pre_val].push_back(eff_val); } } } return dtgs; } void recurse_forwards(const StubbornDTG &dtg, int start_value, int current_value, vector<bool> &reachable) { if (!reachable[current_value]) { reachable[current_value] = true; for (int successor_value : dtg[current_value]) recurse_forwards(dtg, start_value, successor_value, reachable); } } // Relies on both fact sets being sorted by variable. void get_conflicting_vars(const vector<FactPair> &facts1, const vector<FactPair> &facts2, vector<int> &conflicting_vars) { conflicting_vars.clear(); auto facts1_it = facts1.begin(); auto facts2_it = facts2.begin(); while (facts1_it != facts1.end() && facts2_it != facts2.end()) { if (facts1_it->var < facts2_it->var) { ++facts1_it; } else if (facts1_it->var > facts2_it->var) { ++facts2_it; } else { if (facts2_it->value != facts1_it->value) { conflicting_vars.push_back(facts2_it->var); } ++facts1_it; ++facts2_it; } } } StubbornSetsEC::StubbornSetsEC(const options::Options &opts) : StubbornSets(opts) { } void StubbornSetsEC::initialize(const shared_ptr<AbstractTask> &task) { StubbornSets::initialize(task); TaskProxy task_proxy(*task); VariablesProxy variables = task_proxy.get_variables(); written_vars.assign(variables.size(), false); nes_computed = utils::map_vector<vector<bool>>( variables, [](const VariableProxy &var) { return vector<bool>(var.get_domain_size(), false); }); active_ops.assign(num_operators, false); compute_operator_preconditions(task_proxy); build_reachability_map(task_proxy); conflicting_and_disabling.resize(num_operators); conflicting_and_disabling_computed.resize(num_operators, false); disabled.resize(num_operators); disabled_computed.resize(num_operators, false); utils::g_log << "pruning method: stubborn sets ec" << endl; } void StubbornSetsEC::compute_operator_preconditions(const TaskProxy &task_proxy) { int num_variables = task_proxy.get_variables().size(); op_preconditions_on_var = utils::map_vector<vector<int>>( task_proxy.get_operators(), [&](const OperatorProxy &op) { vector<int> preconditions_on_var(num_variables, -1); for (FactProxy precondition : op.get_preconditions()) { FactPair fact = precondition.get_pair(); preconditions_on_var[fact.var] = fact.value; } return preconditions_on_var; }); } void StubbornSetsEC::build_reachability_map(const TaskProxy &task_proxy) { vector<StubbornDTG> dtgs = build_dtgs(task_proxy); reachability_map = utils::map_vector<vector<vector<bool>>>( task_proxy.get_variables(), [&](const VariableProxy &var) { StubbornDTG &dtg = dtgs[var.get_id()]; int num_values = var.get_domain_size(); vector<vector<bool>> var_reachability_map(num_values); for (int start_value = 0; start_value < num_values; ++start_value) { vector<bool> &reachable = var_reachability_map[start_value]; reachable.assign(num_values, false); recurse_forwards(dtg, start_value, start_value, reachable); } return var_reachability_map; }); } void StubbornSetsEC::compute_active_operators(const State &state) { active_ops.assign(active_ops.size(), false); for (int op_no = 0; op_no < num_operators; ++op_no) { bool all_preconditions_are_active = true; for (const FactPair &precondition : sorted_op_preconditions[op_no]) { int var_id = precondition.var; int current_value = state[var_id].get_value(); const vector<bool> &reachable_values = reachability_map[var_id][current_value]; if (!reachable_values[precondition.value]) { all_preconditions_are_active = false; break; } } if (all_preconditions_are_active) { active_ops[op_no] = true; } } } const vector<int> &StubbornSetsEC::get_conflicting_and_disabling(int op1_no) { vector<int> &result = conflicting_and_disabling[op1_no]; if (!conflicting_and_disabling_computed[op1_no]) { for (int op2_no = 0; op2_no < num_operators; ++op2_no) { if (op1_no != op2_no) { bool conflict = can_conflict(op1_no, op2_no); bool disable = can_disable(op2_no, op1_no); if (conflict || disable) { result.push_back(op2_no); } } } result.shrink_to_fit(); conflicting_and_disabling_computed[op1_no] = true; } return result; } const vector<int> &StubbornSetsEC::get_disabled(int op1_no) { vector<int> &result = disabled[op1_no]; if (!disabled_computed[op1_no]) { for (int op2_no = 0; op2_no < num_operators; ++op2_no) { if (op2_no != op1_no && can_disable(op1_no, op2_no)) { result.push_back(op2_no); } } result.shrink_to_fit(); disabled_computed[op1_no] = true; } return result; } bool StubbornSetsEC::is_applicable(int op_no, const State &state) const { return find_unsatisfied_precondition(op_no, state) == FactPair::no_fact; } // TODO: find a better name. void StubbornSetsEC::mark_as_stubborn_and_remember_written_vars( int op_no, const State &state) { if (mark_as_stubborn(op_no)) { if (is_applicable(op_no, state)) { for (const FactPair &effect : sorted_op_effects[op_no]) written_vars[effect.var] = true; } } } /* TODO: think about a better name, which distinguishes this method better from the corresponding method for simple stubborn sets */ void StubbornSetsEC::add_nes_for_fact(const FactPair &fact, const State &state) { for (int achiever : achievers[fact.var][fact.value]) { if (active_ops[achiever]) { mark_as_stubborn_and_remember_written_vars(achiever, state); } } nes_computed[fact.var][fact.value] = true; } void StubbornSetsEC::add_conflicting_and_disabling(int op_no, const State &state) { for (int conflict : get_conflicting_and_disabling(op_no)) { if (active_ops[conflict]) { mark_as_stubborn_and_remember_written_vars(conflict, state); } } } // Relies on op_effects and op_preconditions being sorted by variable. void StubbornSetsEC::get_disabled_vars( int op1_no, int op2_no, vector<int> &disabled_vars) const { get_conflicting_vars(sorted_op_effects[op1_no], sorted_op_preconditions[op2_no], disabled_vars); } void StubbornSetsEC::apply_s5(int op_no, const State &state) { // Find a violated state variable and check if stubborn contains a writer for this variable. for (const FactPair &pre : sorted_op_preconditions[op_no]) { if (state[pre.var].get_value() != pre.value && written_vars[pre.var]) { if (!nes_computed[pre.var][pre.value]) { add_nes_for_fact(pre, state); } return; } } FactPair violated_precondition = find_unsatisfied_precondition(op_no, state); assert(violated_precondition != FactPair::no_fact); if (!nes_computed[violated_precondition.var][violated_precondition.value]) { add_nes_for_fact(violated_precondition, state); } } void StubbornSetsEC::initialize_stubborn_set(const State &state) { for (vector<bool> &by_value : nes_computed) { by_value.assign(by_value.size(), false); } written_vars.assign(written_vars.size(), false); compute_active_operators(state); //rule S1 FactPair unsatisfied_goal = find_unsatisfied_goal(state); assert(unsatisfied_goal != FactPair::no_fact); add_nes_for_fact(unsatisfied_goal, state); // active operators used } void StubbornSetsEC::handle_stubborn_operator(const State &state, int op_no) { if (is_applicable(op_no, state)) { //Rule S2 & S3 add_conflicting_and_disabling(op_no, state); // active operators used //Rule S4' vector<int> disabled_vars; for (int disabled_op_no : get_disabled(op_no)) { if (active_ops[disabled_op_no]) { get_disabled_vars(op_no, disabled_op_no, disabled_vars); if (!disabled_vars.empty()) { // == can_disable(op1_no, op2_no) bool v_applicable_op_found = false; for (int disabled_var : disabled_vars) { //First case: add o' if (is_v_applicable(disabled_var, disabled_op_no, state, op_preconditions_on_var)) { mark_as_stubborn_and_remember_written_vars( disabled_op_no, state); v_applicable_op_found = true; break; } } //Second case: add a necessary enabling set for o' following S5 if (!v_applicable_op_found) { apply_s5(disabled_op_no, state); } } } } } else { // op is inapplicable //S5 apply_s5(op_no, state); } } static shared_ptr<PruningMethod> _parse(OptionParser &parser) { parser.document_synopsis( "StubbornSetsEC", "Stubborn sets represent a state pruning method which computes a subset " "of applicable operators in each state such that completeness and " "optimality of the overall search is preserved. As stubborn sets rely " "on several design choices, there are different variants thereof. " "The variant 'StubbornSetsEC' resolves the design choices such that " "the resulting pruning method is guaranteed to strictly dominate the " "Expansion Core pruning method. For details, see" + utils::format_conference_reference( {"Martin Wehrle", "Malte Helmert", "Yusra Alkhazraji", "Robert Mattmueller"}, "The Relative Pruning Power of Strong Stubborn Sets and Expansion Core", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS13/paper/view/6053/6185", "Proceedings of the 23rd International Conference on Automated Planning " "and Scheduling (ICAPS 2013)", "251-259", "AAAI Press", "2013")); stubborn_sets::add_pruning_options(parser); Options opts = parser.parse(); if (parser.dry_run()) { return nullptr; } return make_shared<StubbornSetsEC>(opts); } static Plugin<PruningMethod> _plugin("stubborn_sets_ec", _parse); }
13,578
C++
36.614958
96
0.582486
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/stubborn_sets_atom_centric.cc
#include "stubborn_sets_atom_centric.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/logging.h" #include "../utils/markup.h" #include "../utils/memory.h" using namespace std; namespace stubborn_sets_atom_centric { StubbornSetsAtomCentric::StubbornSetsAtomCentric(const options::Options &opts) : StubbornSets(opts), use_sibling_shortcut(opts.get<bool>("use_sibling_shortcut")), atom_selection_strategy(opts.get<AtomSelectionStrategy>("atom_selection_strategy")) { } void StubbornSetsAtomCentric::initialize(const shared_ptr<AbstractTask> &task) { StubbornSets::initialize(task); utils::g_log << "pruning method: atom-centric stubborn sets" << endl; TaskProxy task_proxy(*task); int num_variables = task_proxy.get_variables().size(); marked_producers.reserve(num_variables); marked_consumers.reserve(num_variables); for (VariableProxy var : task_proxy.get_variables()) { marked_producers.emplace_back(var.get_domain_size(), false); marked_consumers.emplace_back(var.get_domain_size(), false); } if (use_sibling_shortcut) { marked_producer_variables.resize(num_variables, MARKED_VALUES_NONE); marked_consumer_variables.resize(num_variables, MARKED_VALUES_NONE); } compute_consumers(task_proxy); } void StubbornSetsAtomCentric::compute_consumers(const TaskProxy &task_proxy) { consumers.reserve(task_proxy.get_variables().size()); for (VariableProxy var : task_proxy.get_variables()) { consumers.emplace_back(var.get_domain_size()); } for (OperatorProxy op : task_proxy.get_operators()) { int op_id = op.get_id(); for (FactProxy fact_proxy : op.get_preconditions()) { FactPair fact = fact_proxy.get_pair(); consumers[fact.var][fact.value].push_back(op_id); } } for (auto &outer : consumers) { for (auto &inner : outer) { inner.shrink_to_fit(); } } } bool StubbornSetsAtomCentric::operator_is_applicable(int op, const State &state) const { return find_unsatisfied_precondition(op, state) == FactPair::no_fact; } void StubbornSetsAtomCentric::enqueue_producers(const FactPair &fact) { if (!marked_producers[fact.var][fact.value]) { marked_producers[fact.var][fact.value] = true; producer_queue.push_back(fact); } } void StubbornSetsAtomCentric::enqueue_consumers(const FactPair &fact) { if (!marked_consumers[fact.var][fact.value]) { marked_consumers[fact.var][fact.value] = true; consumer_queue.push_back(fact); } } void StubbornSetsAtomCentric::enqueue_sibling_producers(const FactPair &fact) { /* If we don't use the sibling shortcut handling, we ignore any variable-based marking info and always enqueue all sibling facts of the given fact v=d. */ int dummy_mark = MARKED_VALUES_NONE; int &mark = use_sibling_shortcut ? marked_producer_variables[fact.var] : dummy_mark; if (mark == MARKED_VALUES_NONE) { /* If we don't have marking info for variable v, enqueue all sibling producers of v=d and remember that we marked all siblings. */ int domain_size = consumers[fact.var].size(); for (int value = 0; value < domain_size; ++value) { if (value != fact.value) { enqueue_producers(FactPair(fact.var, value)); } } mark = fact.value; } else if (mark != MARKED_VALUES_ALL && mark != fact.value) { /* Exactly one fact v=d' has not been enqueued. It is therefore the only sibling of v=d that we need to enqueue. */ enqueue_producers(FactPair(fact.var, mark)); mark = MARKED_VALUES_ALL; } } void StubbornSetsAtomCentric::enqueue_sibling_consumers(const FactPair &fact) { // For documentation, see enqueue_sibling_producers(). int dummy_mark = MARKED_VALUES_NONE; int &mark = use_sibling_shortcut ? marked_consumer_variables[fact.var] : dummy_mark; if (mark == MARKED_VALUES_NONE) { int domain_size = consumers[fact.var].size(); for (int value = 0; value < domain_size; ++value) { if (value != fact.value) { enqueue_consumers(FactPair(fact.var, value)); } } mark = fact.value; } else if (mark != MARKED_VALUES_ALL && mark != fact.value) { enqueue_consumers(FactPair(fact.var, mark)); mark = MARKED_VALUES_ALL; } } FactPair StubbornSetsAtomCentric::select_fact( const vector<FactPair> &facts, const State &state) const { FactPair fact = FactPair::no_fact; if (atom_selection_strategy == AtomSelectionStrategy::FAST_DOWNWARD) { fact = stubborn_sets::find_unsatisfied_condition(facts, state); } else if (atom_selection_strategy == AtomSelectionStrategy::QUICK_SKIP) { /* If there is an unsatisfied fact whose producers are already marked, choose it. Otherwise, choose the first unsatisfied fact. */ for (const FactPair &condition : facts) { if (state[condition.var].get_value() != condition.value) { if (marked_producers[condition.var][condition.value]) { fact = condition; break; } else if (fact == FactPair::no_fact) { fact = condition; } } } } else if (atom_selection_strategy == AtomSelectionStrategy::STATIC_SMALL) { int min_count = numeric_limits<int>::max(); for (const FactPair &condition : facts) { if (state[condition.var].get_value() != condition.value) { int count = achievers[condition.var][condition.value].size(); if (count < min_count) { fact = condition; min_count = count; } } } } else if (atom_selection_strategy == AtomSelectionStrategy::DYNAMIC_SMALL) { int min_count = numeric_limits<int>::max(); for (const FactPair &condition : facts) { if (state[condition.var].get_value() != condition.value) { const vector<int> &ops = achievers[condition.var][condition.value]; int count = count_if( ops.begin(), ops.end(), [&](int op) {return !stubborn[op];}); if (count < min_count) { fact = condition; min_count = count; } } } } else { ABORT("Unknown atom selection strategy"); } assert(fact != FactPair::no_fact); return fact; } void StubbornSetsAtomCentric::enqueue_nes(int op, const State &state) { FactPair fact = select_fact(sorted_op_preconditions[op], state); enqueue_producers(fact); } void StubbornSetsAtomCentric::enqueue_interferers(int op) { for (const FactPair &fact : sorted_op_preconditions[op]) { // Enqueue operators that disable op. enqueue_sibling_producers(fact); } for (const FactPair &fact : sorted_op_effects[op]) { // Enqueue operators that conflict with op. enqueue_sibling_producers(fact); // Enqueue operators that op disables. enqueue_sibling_consumers(fact); } } void StubbornSetsAtomCentric::initialize_stubborn_set(const State &state) { assert(producer_queue.empty()); assert(consumer_queue.empty()); // Reset data structures from previous call. for (auto &facts : marked_producers) { facts.assign(facts.size(), false); } for (auto &facts : marked_consumers) { facts.assign(facts.size(), false); } if (use_sibling_shortcut) { int num_variables = state.size(); marked_producer_variables.assign(num_variables, MARKED_VALUES_NONE); marked_consumer_variables.assign(num_variables, MARKED_VALUES_NONE); } FactPair unsatisfied_goal = select_fact(sorted_goals, state); assert(unsatisfied_goal != FactPair::no_fact); enqueue_producers(unsatisfied_goal); while (!producer_queue.empty() || !consumer_queue.empty()) { if (!producer_queue.empty()) { FactPair fact = producer_queue.back(); producer_queue.pop_back(); for (int op : achievers[fact.var][fact.value]) { handle_stubborn_operator(state, op); } } else { FactPair fact = consumer_queue.back(); consumer_queue.pop_back(); for (int op : consumers[fact.var][fact.value]) { handle_stubborn_operator(state, op); } } } } void StubbornSetsAtomCentric::handle_stubborn_operator(const State &state, int op) { if (!stubborn[op]) { stubborn[op] = true; if (operator_is_applicable(op, state)) { enqueue_interferers(op); } else { enqueue_nes(op, state); } } } static shared_ptr<PruningMethod> _parse(OptionParser &parser) { parser.document_synopsis( "Atom-centric stubborn sets", "Stubborn sets are a state pruning method which computes a subset " "of applicable actions in each state such that completeness and " "optimality of the overall search is preserved. Previous stubborn set " "implementations mainly track information about actions. In contrast, " "this implementation focuses on atomic propositions (atoms), which " "often speeds up the computation on IPC benchmarks. For details, see" + utils::format_conference_reference( {"Gabriele Roeger", "Malte Helmert", "Jendrik Seipp", "Silvan Sievers"}, "An Atom-Centric Perspective on Stubborn Sets", "https://ai.dmi.unibas.ch/papers/roeger-et-al-socs2020.pdf", "Proceedings of the 13th Annual Symposium on Combinatorial Search " "(SoCS 2020)", "57-65", "AAAI Press", "2020")); parser.add_option<bool>( "use_sibling_shortcut", "use variable-based marking in addition to atom-based marking", "true"); vector<string> strategies; vector<string> strategies_docs; strategies.push_back("fast_downward"); strategies_docs.push_back( "select the atom (v, d) with the variable v that comes first in the Fast " "Downward variable ordering (which is based on the causal graph)"); strategies.push_back("quick_skip"); strategies_docs.push_back( "if possible, select an unsatisfied atom whose producers are already marked"); strategies.push_back("static_small"); strategies_docs.push_back("select the atom achieved by the fewest number of actions"); strategies.push_back("dynamic_small"); strategies_docs.push_back( "select the atom achieved by the fewest number of actions that are not " "yet part of the stubborn set"); parser.add_enum_option<AtomSelectionStrategy>( "atom_selection_strategy", strategies, "Strategy for selecting unsatisfied atoms from action preconditions or " "the goal atoms. All strategies use the fast_downward strategy for " "breaking ties.", "quick_skip", strategies_docs); stubborn_sets::add_pruning_options(parser); Options opts = parser.parse(); if (parser.dry_run()) { return nullptr; } return make_shared<StubbornSetsAtomCentric>(opts); } static Plugin<PruningMethod> _plugin("atom_centric_stubborn_sets", _parse); }
11,574
C++
36.95082
91
0.623034
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/stubborn_sets_ec.h
#ifndef PRUNING_STUBBORN_SETS_EC_H #define PRUNING_STUBBORN_SETS_EC_H #include "stubborn_sets.h" namespace stubborn_sets_ec { class StubbornSetsEC : public stubborn_sets::StubbornSets { private: std::vector<std::vector<std::vector<bool>>> reachability_map; std::vector<std::vector<int>> op_preconditions_on_var; std::vector<bool> active_ops; std::vector<std::vector<int>> conflicting_and_disabling; std::vector<bool> conflicting_and_disabling_computed; std::vector<std::vector<int>> disabled; std::vector<bool> disabled_computed; std::vector<bool> written_vars; std::vector<std::vector<bool>> nes_computed; bool is_applicable(int op_no, const State &state) const; void get_disabled_vars(int op1_no, int op2_no, std::vector<int> &disabled_vars) const; void build_reachability_map(const TaskProxy &task_proxy); void compute_operator_preconditions(const TaskProxy &task_proxy); const std::vector<int> &get_conflicting_and_disabling(int op1_no); const std::vector<int> &get_disabled(int op1_no); void add_conflicting_and_disabling(int op_no, const State &state); void compute_active_operators(const State &state); void mark_as_stubborn_and_remember_written_vars(int op_no, const State &state); void add_nes_for_fact(const FactPair &fact, const State &state); void apply_s5(int op_no, const State &state); protected: virtual void initialize_stubborn_set(const State &state) override; virtual void handle_stubborn_operator(const State &state, int op_no) override; public: virtual void initialize(const std::shared_ptr<AbstractTask> &task) override; explicit StubbornSetsEC(const options::Options &opts); }; } #endif
1,737
C
41.390243
83
0.716753
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/stubborn_sets_simple.h
#ifndef PRUNING_STUBBORN_SETS_SIMPLE_H #define PRUNING_STUBBORN_SETS_SIMPLE_H #include "stubborn_sets.h" namespace stubborn_sets_simple { /* Implementation of simple instantiation of strong stubborn sets. Disjunctive action landmarks are computed trivially.*/ class StubbornSetsSimple : public stubborn_sets::StubbornSets { /* interference_relation[op1_no] contains all operator indices of operators that interfere with op1. */ std::vector<std::vector<int>> interference_relation; std::vector<bool> interference_relation_computed; void add_necessary_enabling_set(const FactPair &fact); void add_interfering(int op_no); inline bool interfere(int op1_no, int op2_no) { return can_disable(op1_no, op2_no) || can_conflict(op1_no, op2_no) || can_disable(op2_no, op1_no); } const std::vector<int> &get_interfering_operators(int op1_no); protected: virtual void initialize_stubborn_set(const State &state) override; virtual void handle_stubborn_operator(const State &state, int op_no) override; public: explicit StubbornSetsSimple(const options::Options &opts); virtual void initialize(const std::shared_ptr<AbstractTask> &task) override; }; } #endif
1,288
C
34.805555
80
0.696429
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/stubborn_sets_simple.cc
#include "stubborn_sets_simple.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/logging.h" #include "../utils/markup.h" using namespace std; namespace stubborn_sets_simple { StubbornSetsSimple::StubbornSetsSimple(const options::Options &opts) : StubbornSets(opts) { } void StubbornSetsSimple::initialize(const shared_ptr<AbstractTask> &task) { StubbornSets::initialize(task); interference_relation.resize(num_operators); interference_relation_computed.resize(num_operators, false); utils::g_log << "pruning method: stubborn sets simple" << endl; } const vector<int> &StubbornSetsSimple::get_interfering_operators(int op1_no) { /* TODO: as interference is symmetric, we only need to compute the relation for operators (o1, o2) with (o1 < o2) and add a lookup method that looks up (i, j) if i < j and (j, i) otherwise. */ vector<int> &interfere_op1 = interference_relation[op1_no]; if (!interference_relation_computed[op1_no]) { for (int op2_no = 0; op2_no < num_operators; ++op2_no) { if (op1_no != op2_no && interfere(op1_no, op2_no)) { interfere_op1.push_back(op2_no); } } interfere_op1.shrink_to_fit(); interference_relation_computed[op1_no] = true; } return interfere_op1; } // Add all operators that achieve the fact (var, value) to stubborn set. void StubbornSetsSimple::add_necessary_enabling_set(const FactPair &fact) { for (int op_no : achievers[fact.var][fact.value]) { mark_as_stubborn(op_no); } } // Add all operators that interfere with op. void StubbornSetsSimple::add_interfering(int op_no) { for (int interferer_no : get_interfering_operators(op_no)) { mark_as_stubborn(interferer_no); } } void StubbornSetsSimple::initialize_stubborn_set(const State &state) { // Add a necessary enabling set for an unsatisfied goal. FactPair unsatisfied_goal = find_unsatisfied_goal(state); assert(unsatisfied_goal != FactPair::no_fact); add_necessary_enabling_set(unsatisfied_goal); } void StubbornSetsSimple::handle_stubborn_operator(const State &state, int op_no) { FactPair unsatisfied_precondition = find_unsatisfied_precondition(op_no, state); if (unsatisfied_precondition == FactPair::no_fact) { /* no unsatisfied precondition found => operator is applicable => add all interfering operators */ add_interfering(op_no); } else { /* unsatisfied precondition found => add a necessary enabling set for it */ add_necessary_enabling_set(unsatisfied_precondition); } } static shared_ptr<PruningMethod> _parse(OptionParser &parser) { parser.document_synopsis( "Stubborn sets simple", "Stubborn sets represent a state pruning method which computes a subset " "of applicable operators in each state such that completeness and " "optimality of the overall search is preserved. As stubborn sets rely " "on several design choices, there are different variants thereof. " "The variant 'StubbornSetsSimple' resolves the design choices in a " "straight-forward way. For details, see the following papers: " + utils::format_conference_reference( {"Yusra Alkhazraji", "Martin Wehrle", "Robert Mattmueller", "Malte Helmert"}, "A Stubborn Set Algorithm for Optimal Planning", "https://ai.dmi.unibas.ch/papers/alkhazraji-et-al-ecai2012.pdf", "Proceedings of the 20th European Conference on Artificial Intelligence " "(ECAI 2012)", "891-892", "IOS Press", "2012") + utils::format_conference_reference( {"Martin Wehrle", "Malte Helmert"}, "Efficient Stubborn Sets: Generalized Algorithms and Selection Strategies", "http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7922/8042", "Proceedings of the 24th International Conference on Automated Planning " " and Scheduling (ICAPS 2014)", "323-331", "AAAI Press", "2014")); stubborn_sets::add_pruning_options(parser); Options opts = parser.parse(); if (parser.dry_run()) { return nullptr; } return make_shared<StubbornSetsSimple>(opts); } static Plugin<PruningMethod> _plugin("stubborn_sets_simple", _parse); }
4,504
C++
36.541666
89
0.650533
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/null_pruning_method.cc
#include "null_pruning_method.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/logging.h" using namespace std; namespace null_pruning_method { void NullPruningMethod::initialize(const shared_ptr<AbstractTask> &task) { PruningMethod::initialize(task); utils::g_log << "pruning method: none" << endl; } static shared_ptr<PruningMethod> _parse(OptionParser &parser) { parser.document_synopsis( "No pruning", "This is a skeleton method that does not perform any pruning, i.e., " "all applicable operators are applied in all expanded states. "); if (parser.dry_run()) { return nullptr; } return make_shared<NullPruningMethod>(); } static Plugin<PruningMethod> _plugin("null", _parse); }
772
C++
23.935483
77
0.678756
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/stubborn_sets.h
#ifndef PRUNING_STUBBORN_SETS_H #define PRUNING_STUBBORN_SETS_H #include "../abstract_task.h" #include "../pruning_method.h" #include "../utils/timer.h" namespace options { class OptionParser; } namespace stubborn_sets { inline FactPair find_unsatisfied_condition( const std::vector<FactPair> &conditions, const State &state); class StubbornSets : public PruningMethod { const double min_required_pruning_ratio; const int num_expansions_before_checking_pruning_ratio; int num_pruning_calls; bool is_pruning_disabled; utils::Timer timer; long num_unpruned_successors_generated; long num_pruned_successors_generated; /* stubborn_queue contains the operator indices of operators that have been marked as stubborn but have not yet been processed (i.e. more operators might need to be added to stubborn because of the operators in the queue). */ std::vector<int> stubborn_queue; void compute_sorted_operators(const TaskProxy &task_proxy); void compute_achievers(const TaskProxy &task_proxy); protected: /* We copy some parts of the task here, so we can avoid the more expensive access through the task interface during the search. */ int num_operators; std::vector<std::vector<FactPair>> sorted_op_preconditions; std::vector<std::vector<FactPair>> sorted_op_effects; std::vector<FactPair> sorted_goals; /* achievers[var][value] contains all operator indices of operators that achieve the fact (var, value). */ std::vector<std::vector<std::vector<int>>> achievers; /* stubborn[op_no] is true iff the operator with operator index op_no is contained in the stubborn set */ std::vector<bool> stubborn; bool can_disable(int op1_no, int op2_no) const; bool can_conflict(int op1_no, int op2_no) const; /* Return the first unsatified goal pair, or FactPair::no_fact if there is none. Note that we use a sorted list of goals here intentionally. (See comment on find_unsatisfied_precondition.) */ FactPair find_unsatisfied_goal(const State &state) const { return find_unsatisfied_condition(sorted_goals, state); } /* Return the first unsatified precondition, or FactPair::no_fact if there is none. Note that we use a sorted list of preconditions here intentionally. The ICAPS paper "Efficient Stubborn Sets: Generalized Algorithms and Selection Strategies" (Wehrle and Helmert, 2014) contains a limited study of this (see section "Strategies for Choosing Unsatisfied Conditions" and especially subsection "Static Variable Orderings"). One of the outcomes was the sorted version ("static orders/FD" in Table 1 of the paper) is dramatically better than choosing preconditions and goals randomly every time ("dynamic orders/random" in Table 1). The code also intentionally uses the "causal graph order" of variables rather than an arbitrary variable order. (However, so far, there is no experimental evidence that this is a particularly good order.) */ FactPair find_unsatisfied_precondition(int op_no, const State &state) const { return find_unsatisfied_condition(sorted_op_preconditions[op_no], state); } // Return true iff the operator was enqueued. // TODO: rename to enqueue_stubborn_operator? bool mark_as_stubborn(int op_no); virtual void initialize_stubborn_set(const State &state) = 0; virtual void handle_stubborn_operator(const State &state, int op_no) = 0; public: explicit StubbornSets(const options::Options &opts); virtual void initialize(const std::shared_ptr<AbstractTask> &task) override; /* TODO: move prune_operators, and also the statistics, to the base class to have only one method virtual, and to make the interface more obvious */ virtual void prune_operators(const State &state, std::vector<OperatorID> &op_ids) override; virtual void print_statistics() const override; }; // Return the first unsatified condition, or FactPair::no_fact if there is none. inline FactPair find_unsatisfied_condition( const std::vector<FactPair> &conditions, const State &state) { for (const FactPair &condition : conditions) { if (state[condition.var].get_value() != condition.value) return condition; } return FactPair::no_fact; } void add_pruning_options(options::OptionParser &parser); } #endif
4,533
C
35.861788
81
0.704169
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning/null_pruning_method.h
#ifndef PRUNING_NULL_PRUNING_METHOD_H #define PRUNING_NULL_PRUNING_METHOD_H #include "../pruning_method.h" namespace null_pruning_method { class NullPruningMethod : public PruningMethod { public: virtual void initialize(const std::shared_ptr<AbstractTask> &) override; virtual void prune_operators(const State &, std::vector<OperatorID> &) override {} virtual void print_statistics() const override {} }; } #endif
462
C
26.235293
76
0.692641
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/weighted_evaluator.h
#ifndef EVALUATORS_WEIGHTED_EVALUATOR_H #define EVALUATORS_WEIGHTED_EVALUATOR_H #include "../evaluator.h" #include <memory> namespace options { class Options; } namespace weighted_evaluator { class WeightedEvaluator : public Evaluator { std::shared_ptr<Evaluator> evaluator; int w; public: explicit WeightedEvaluator(const options::Options &opts); WeightedEvaluator(const std::shared_ptr<Evaluator> &eval, int weight); virtual ~WeightedEvaluator() override; virtual bool dead_ends_are_reliable() const override; virtual EvaluationResult compute_result( EvaluationContext &eval_context) override; virtual void get_path_dependent_evaluators(std::set<Evaluator *> &evals) override; }; } #endif
738
C
23.633333
86
0.745257
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/pref_evaluator.h
#ifndef EVALUATORS_PREF_EVALUATOR_H #define EVALUATORS_PREF_EVALUATOR_H #include "../evaluator.h" #include <string> #include <vector> namespace pref_evaluator { class PrefEvaluator : public Evaluator { public: PrefEvaluator(); virtual ~PrefEvaluator() override; virtual EvaluationResult compute_result( EvaluationContext &eval_context) override; virtual void get_path_dependent_evaluators(std::set<Evaluator *> &) override {} }; } #endif
467
C
20.272726
83
0.732334
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/const_evaluator.h
#ifndef EVALUATORS_CONST_EVALUATOR_H #define EVALUATORS_CONST_EVALUATOR_H #include "../evaluator.h" namespace options { class Options; } namespace const_evaluator { class ConstEvaluator : public Evaluator { int value; protected: virtual EvaluationResult compute_result( EvaluationContext &eval_context) override; public: explicit ConstEvaluator(const options::Options &opts); virtual void get_path_dependent_evaluators( std::set<Evaluator *> &) override {} virtual ~ConstEvaluator() override = default; }; } #endif
557
C
19.666666
58
0.72711
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/max_evaluator.cc
#include "max_evaluator.h" #include "../option_parser.h" #include "../plugin.h" #include <cassert> using namespace std; namespace max_evaluator { MaxEvaluator::MaxEvaluator(const Options &opts) : CombiningEvaluator(opts.get_list<shared_ptr<Evaluator>>("evals")) { } MaxEvaluator::~MaxEvaluator() { } int MaxEvaluator::combine_values(const vector<int> &values) { int result = 0; for (int value : values) { assert(value >= 0); result = max(result, value); } return result; } static shared_ptr<Evaluator> _parse(OptionParser &parser) { parser.document_synopsis( "Max evaluator", "Calculates the maximum of the sub-evaluators."); parser.add_list_option<shared_ptr<Evaluator>>( "evals", "at least one evaluator"); Options opts = parser.parse(); opts.verify_list_non_empty<shared_ptr<Evaluator>>("evals"); if (parser.dry_run()) { return nullptr; } return make_shared<MaxEvaluator>(opts); } static Plugin<Evaluator> plugin("max", _parse, "evaluators_basic"); }
1,069
C++
21.765957
73
0.652011
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/combining_evaluator.cc
#include "combining_evaluator.h" #include "../evaluation_context.h" #include "../evaluation_result.h" using namespace std; namespace combining_evaluator { CombiningEvaluator::CombiningEvaluator( const vector<shared_ptr<Evaluator>> &subevaluators_) : subevaluators(subevaluators_) { all_dead_ends_are_reliable = true; for (const shared_ptr<Evaluator> &subevaluator : subevaluators) if (!subevaluator->dead_ends_are_reliable()) all_dead_ends_are_reliable = false; } CombiningEvaluator::~CombiningEvaluator() { } bool CombiningEvaluator::dead_ends_are_reliable() const { return all_dead_ends_are_reliable; } EvaluationResult CombiningEvaluator::compute_result( EvaluationContext &eval_context) { // This marks no preferred operators. EvaluationResult result; vector<int> values; values.reserve(subevaluators.size()); // Collect component values. Return infinity if any is infinite. for (const shared_ptr<Evaluator> &subevaluator : subevaluators) { int value = eval_context.get_evaluator_value_or_infinity(subevaluator.get()); if (value == EvaluationResult::INFTY) { result.set_evaluator_value(value); return result; } else { values.push_back(value); } } // If we arrived here, all subevaluator values are finite. result.set_evaluator_value(combine_values(values)); return result; } void CombiningEvaluator::get_path_dependent_evaluators( set<Evaluator *> &evals) { for (auto &subevaluator : subevaluators) subevaluator->get_path_dependent_evaluators(evals); } }
1,636
C++
29.314814
85
0.693154
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/max_evaluator.h
#ifndef EVALUATORS_MAX_EVALUATOR_H #define EVALUATORS_MAX_EVALUATOR_H #include "combining_evaluator.h" #include <vector> namespace options { class Options; } namespace max_evaluator { class MaxEvaluator : public combining_evaluator::CombiningEvaluator { protected: virtual int combine_values(const std::vector<int> &values) override; public: explicit MaxEvaluator(const options::Options &opts); virtual ~MaxEvaluator() override; }; } #endif
459
C
18.166666
72
0.760349
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/plugin_group.cc
#include "../plugin.h" namespace evaluators_plugin_group { static PluginGroupPlugin _plugin( "evaluators_basic", "Basic Evaluators"); }
145
C++
17.249998
35
0.710345
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/pref_evaluator.cc
#include "pref_evaluator.h" #include "../evaluation_context.h" #include "../evaluation_result.h" #include "../option_parser.h" #include "../plugin.h" using namespace std; namespace pref_evaluator { PrefEvaluator::PrefEvaluator() { } PrefEvaluator::~PrefEvaluator() { } EvaluationResult PrefEvaluator::compute_result( EvaluationContext &eval_context) { EvaluationResult result; if (eval_context.is_preferred()) result.set_evaluator_value(0); else result.set_evaluator_value(1); return result; } static shared_ptr<Evaluator> _parse(OptionParser &parser) { parser.document_synopsis("Preference evaluator", "Returns 0 if preferred is true and 1 otherwise."); parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<PrefEvaluator>(); } static Plugin<Evaluator> _plugin("pref", _parse, "evaluators_basic"); }
929
C++
22.846153
80
0.670614
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/g_evaluator.h
#ifndef EVALUATORS_G_EVALUATOR_H #define EVALUATORS_G_EVALUATOR_H #include "../evaluator.h" namespace g_evaluator { class GEvaluator : public Evaluator { public: GEvaluator() = default; virtual ~GEvaluator() override = default; virtual EvaluationResult compute_result( EvaluationContext &eval_context) override; virtual void get_path_dependent_evaluators(std::set<Evaluator *> &) override {} }; } #endif
433
C
20.699999
83
0.720554
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/sum_evaluator.h
#ifndef EVALUATORS_SUM_EVALUATOR_H #define EVALUATORS_SUM_EVALUATOR_H #include "combining_evaluator.h" #include <memory> #include <vector> namespace options { class Options; } namespace sum_evaluator { class SumEvaluator : public combining_evaluator::CombiningEvaluator { protected: virtual int combine_values(const std::vector<int> &values) override; public: explicit SumEvaluator(const options::Options &opts); explicit SumEvaluator(const std::vector<std::shared_ptr<Evaluator>> &evals); virtual ~SumEvaluator() override; }; } #endif
557
C
21.319999
80
0.75763
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/combining_evaluator.h
#ifndef EVALUATORS_COMBINING_EVALUATOR_H #define EVALUATORS_COMBINING_EVALUATOR_H #include "../evaluator.h" #include <memory> #include <set> #include <string> #include <vector> namespace combining_evaluator { /* CombiningEvaluator is the base class for SumEvaluator and MaxEvaluator, which captures the common aspects of their behaviour. */ class CombiningEvaluator : public Evaluator { std::vector<std::shared_ptr<Evaluator>> subevaluators; bool all_dead_ends_are_reliable; protected: virtual int combine_values(const std::vector<int> &values) = 0; public: explicit CombiningEvaluator( const std::vector<std::shared_ptr<Evaluator>> &subevaluators_); virtual ~CombiningEvaluator() override; /* Note: dead_ends_are_reliable() is a state-independent method, so it only returns true if all subevaluators report dead ends reliably. Note that we could get more fine-grained information when considering of reliability for a given evaluated state. For example, if we use h1 (unreliable) and h2 (reliable) and have a state where h1 is finite and h2 is infinite, then we can *reliably* mark the state as a dead end. There is currently no way to exploit such state-based information, and hence we do not compute it. */ virtual bool dead_ends_are_reliable() const override; virtual EvaluationResult compute_result( EvaluationContext &eval_context) override; virtual void get_path_dependent_evaluators( std::set<Evaluator *> &evals) override; }; } #endif
1,574
C
31.142857
74
0.721728
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/const_evaluator.cc
#include "const_evaluator.h" #include "../option_parser.h" #include "../plugin.h" using namespace std; namespace const_evaluator { ConstEvaluator::ConstEvaluator(const Options &opts) : value(opts.get<int>("value")) { } EvaluationResult ConstEvaluator::compute_result(EvaluationContext &) { EvaluationResult result; result.set_evaluator_value(value); return result; } static shared_ptr<Evaluator> _parse(OptionParser &parser) { parser.document_synopsis( "Constant evaluator", "Returns a constant value."); parser.add_option<int>( "value", "the constant value", "1", Bounds("0", "infinity")); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<ConstEvaluator>(opts); } static Plugin<Evaluator> _plugin("const", _parse, "evaluators_basic"); }
889
C++
22.421052
70
0.655793
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/weighted_evaluator.cc
#include "weighted_evaluator.h" #include "../evaluation_context.h" #include "../evaluation_result.h" #include "../option_parser.h" #include "../plugin.h" #include <cstdlib> #include <sstream> using namespace std; namespace weighted_evaluator { WeightedEvaluator::WeightedEvaluator(const Options &opts) : evaluator(opts.get<shared_ptr<Evaluator>>("eval")), w(opts.get<int>("weight")) { } WeightedEvaluator::WeightedEvaluator(const shared_ptr<Evaluator> &eval, int weight) : evaluator(eval), w(weight) { } WeightedEvaluator::~WeightedEvaluator() { } bool WeightedEvaluator::dead_ends_are_reliable() const { return evaluator->dead_ends_are_reliable(); } EvaluationResult WeightedEvaluator::compute_result( EvaluationContext &eval_context) { // Note that this produces no preferred operators. EvaluationResult result; int value = eval_context.get_evaluator_value_or_infinity(evaluator.get()); if (value != EvaluationResult::INFTY) { // TODO: Check for overflow? value *= w; } result.set_evaluator_value(value); return result; } void WeightedEvaluator::get_path_dependent_evaluators(set<Evaluator *> &evals) { evaluator->get_path_dependent_evaluators(evals); } static shared_ptr<Evaluator> _parse(OptionParser &parser) { parser.document_synopsis( "Weighted evaluator", "Multiplies the value of the evaluator with the given weight."); parser.add_option<shared_ptr<Evaluator>>("eval", "evaluator"); parser.add_option<int>("weight", "weight"); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<WeightedEvaluator>(opts); } static Plugin<Evaluator> _plugin("weight", _parse, "evaluators_basic"); }
1,768
C++
27.532258
83
0.697964
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/g_evaluator.cc
#include "g_evaluator.h" #include "../evaluation_context.h" #include "../evaluation_result.h" #include "../option_parser.h" #include "../plugin.h" using namespace std; namespace g_evaluator { EvaluationResult GEvaluator::compute_result(EvaluationContext &eval_context) { EvaluationResult result; result.set_evaluator_value(eval_context.get_g_value()); return result; } static shared_ptr<Evaluator> _parse(OptionParser &parser) { parser.document_synopsis( "g-value evaluator", "Returns the g-value (path cost) of the search node."); parser.parse(); if (parser.dry_run()) return nullptr; else return make_shared<GEvaluator>(); } static Plugin<Evaluator> _plugin("g", _parse, "evaluators_basic"); }
761
C++
24.399999
78
0.683311
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluators/sum_evaluator.cc
#include "sum_evaluator.h" #include "../option_parser.h" #include "../plugin.h" #include <cassert> #include <limits> using namespace std; namespace sum_evaluator { SumEvaluator::SumEvaluator(const Options &opts) : CombiningEvaluator(opts.get_list<shared_ptr<Evaluator>>("evals")) { } SumEvaluator::SumEvaluator(const vector<shared_ptr<Evaluator>> &evals) : CombiningEvaluator(evals) { } SumEvaluator::~SumEvaluator() { } int SumEvaluator::combine_values(const vector<int> &values) { int result = 0; for (int value : values) { assert(value >= 0); result += value; assert(result >= 0); // Check against overflow. } return result; } static shared_ptr<Evaluator> _parse(OptionParser &parser) { parser.document_synopsis("Sum evaluator", "Calculates the sum of the sub-evaluators."); parser.add_list_option<shared_ptr<Evaluator>>("evals", "at least one evaluator"); Options opts = parser.parse(); opts.verify_list_non_empty<shared_ptr<Evaluator>>("evals"); if (parser.dry_run()) return nullptr; else return make_shared<SumEvaluator>(opts); } static Plugin<Evaluator> _plugin("sum", _parse, "evaluators_basic"); }
1,236
C++
23.74
85
0.659385
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/lp/lp_solver.cc
#include "lp_solver.h" #include "lp_internals.h" #include "../option_parser.h" #include "../utils/logging.h" #include "../utils/system.h" #ifdef USE_LP #ifdef __GNUG__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #endif #include <OsiSolverInterface.hpp> #include <CoinPackedMatrix.hpp> #include <CoinPackedVector.hpp> #ifdef __GNUG__ #pragma GCC diagnostic pop #endif #endif #include <cassert> #include <iostream> #include <numeric> using namespace std; using utils::ExitCode; namespace lp { void add_lp_solver_option_to_parser(OptionParser &parser) { parser.document_note( "Note", "to use an LP solver, you must build the planner with LP support. " "See LPBuildInstructions."); vector<string> lp_solvers; vector<string> lp_solvers_doc; lp_solvers.push_back("CLP"); lp_solvers_doc.push_back("default LP solver shipped with the COIN library"); lp_solvers.push_back("CPLEX"); lp_solvers_doc.push_back("commercial solver by IBM"); lp_solvers.push_back("GUROBI"); lp_solvers_doc.push_back("commercial solver"); lp_solvers.push_back("SOPLEX"); lp_solvers_doc.push_back("open source solver by ZIB"); parser.add_enum_option<LPSolverType>( "lpsolver", lp_solvers, "external solver that should be used to solve linear programs", "CPLEX", lp_solvers_doc); } LPConstraint::LPConstraint(double lower_bound, double upper_bound) : lower_bound(lower_bound), upper_bound(upper_bound) { } void LPConstraint::clear() { variables.clear(); coefficients.clear(); } bool LPConstraint::empty() const { return variables.empty(); } void LPConstraint::insert(int index, double coefficient) { variables.push_back(index); coefficients.push_back(coefficient); } ostream &LPConstraint::dump(ostream &stream, double infinity, const LinearProgram *program) { if (lower_bound != -infinity) { stream << lower_bound << " <= "; } for (size_t i = 0; i < variables.size(); ++i) { if (i != 0) stream << " + "; int variable = variables[i]; string variable_name; if (program && program->get_variables().has_names() && !program->get_variables().get_name(variable).empty()) { variable_name = program->get_variables().get_name(variable); } else { variable_name = "v" + to_string(variable); } stream << coefficients[i] << " * " << variable_name; } if (upper_bound != infinity) { stream << " <= " << upper_bound; } else if (lower_bound == -infinity) { stream << " <= infinity"; } return stream; } LPVariable::LPVariable(double lower_bound, double upper_bound, double objective_coefficient, bool is_integer) : lower_bound(lower_bound), upper_bound(upper_bound), objective_coefficient(objective_coefficient), is_integer(is_integer) { } named_vector::NamedVector<LPVariable> &LinearProgram::get_variables() { return variables; } named_vector::NamedVector<LPConstraint> &LinearProgram::get_constraints() { return constraints; } LPObjectiveSense LinearProgram::get_sense() const { return sense; } const named_vector::NamedVector<LPVariable> &LinearProgram::get_variables() const { return variables; } const named_vector::NamedVector<LPConstraint> &LinearProgram::get_constraints() const { return constraints; } const string &LinearProgram::get_objective_name() const { return objective_name; } void LinearProgram::set_objective_name(string name) { objective_name = name; } LPSolver::~LPSolver() { } #ifdef USE_LP LPSolver::LPSolver(LPSolverType solver_type) : is_initialized(false), is_mip(false), is_solved(false), num_permanent_constraints(0), has_temporary_constraints_(false) { try { lp_solver = create_lp_solver(solver_type); } catch (CoinError &error) { handle_coin_error(error); } } void LPSolver::clear_temporary_data() { elements.clear(); indices.clear(); starts.clear(); col_lb.clear(); col_ub.clear(); objective.clear(); row_lb.clear(); row_ub.clear(); rows.clear(); } void LPSolver::load_problem(const LinearProgram &lp) { clear_temporary_data(); is_mip = false; is_initialized = false; num_permanent_constraints = lp.get_constraints().size(); for (const LPVariable &var : lp.get_variables()) { col_lb.push_back(var.lower_bound); col_ub.push_back(var.upper_bound); objective.push_back(var.objective_coefficient); } for (const LPConstraint &constraint : lp.get_constraints()) { row_lb.push_back(constraint.get_lower_bound()); row_ub.push_back(constraint.get_upper_bound()); } for (const LPConstraint &constraint : lp.get_constraints()) { const vector<int> &vars = constraint.get_variables(); const vector<double> &coeffs = constraint.get_coefficients(); assert(vars.size() == coeffs.size()); starts.push_back(elements.size()); indices.insert(indices.end(), vars.begin(), vars.end()); elements.insert(elements.end(), coeffs.begin(), coeffs.end()); } /* There are two ways to pass the lengths of vectors to a CoinMatrix: 1) 'starts' contains one entry per vector and we pass a separate array of vector 'lengths' to the constructor. 2) If there are no gaps in the elements, we can also add elements.size() as a last entry in the vector 'starts' and leave the parameter for 'lengths' at its default (0). OSI recreates the 'lengths' array in any case and uses optimized code for the second case, so we use it here. */ starts.push_back(elements.size()); try { CoinPackedMatrix matrix(false, lp.get_variables().size(), lp.get_constraints().size(), elements.size(), elements.data(), indices.data(), starts.data(), 0); lp_solver->loadProblem(matrix, col_lb.data(), col_ub.data(), objective.data(), row_lb.data(), row_ub.data()); for (int i = 0; i < static_cast<int>(lp.get_variables().size()); ++i) { if (lp.get_variables()[i].is_integer) { lp_solver->setInteger(i); is_mip = true; } } /* We set the objective sense after loading because the SoPlex interfaces of all OSI versions <= 0.108.4 ignore it when it is set earlier. See issue752 for details. */ if (lp.get_sense() == LPObjectiveSense::MINIMIZE) { lp_solver->setObjSense(1); } else { lp_solver->setObjSense(-1); } if (!lp.get_objective_name().empty()) { lp_solver->setObjName(lp.get_objective_name()); } else if (lp.get_variables().has_names() || lp.get_constraints().has_names()) { // OSI requires the objective name to be set whenever any variable or constraint names are set. lp_solver->setObjName("obj"); } if (lp.get_variables().has_names() || lp.get_constraints().has_names() || !lp.get_objective_name().empty()) { lp_solver->setIntParam(OsiIntParam::OsiNameDiscipline, 2); } else { lp_solver->setIntParam(OsiIntParam::OsiNameDiscipline, 0); } if (lp.get_variables().has_names()) { for (int i = 0; i < lp.get_variables().size(); ++i) { lp_solver->setColName(i, lp.get_variables().get_name(i)); } } if (lp.get_constraints().has_names()) { for (int i = 0; i < lp.get_constraints().size(); ++i) { lp_solver->setRowName(i, lp.get_constraints().get_name(i)); } } } catch (CoinError &error) { handle_coin_error(error); } clear_temporary_data(); } void LPSolver::add_temporary_constraints(const vector<LPConstraint> &constraints) { if (!constraints.empty()) { clear_temporary_data(); int num_rows = constraints.size(); for (const LPConstraint &constraint : constraints) { row_lb.push_back(constraint.get_lower_bound()); row_ub.push_back(constraint.get_upper_bound()); rows.push_back(new CoinShallowPackedVector( constraint.get_variables().size(), constraint.get_variables().data(), constraint.get_coefficients().data(), false)); } try { lp_solver->addRows(num_rows, rows.data(), row_lb.data(), row_ub.data()); } catch (CoinError &error) { handle_coin_error(error); } for (CoinPackedVectorBase *row : rows) { delete row; } clear_temporary_data(); has_temporary_constraints_ = true; is_solved = false; } } void LPSolver::clear_temporary_constraints() { if (has_temporary_constraints_) { try { lp_solver->restoreBaseModel(num_permanent_constraints); } catch (CoinError &error) { handle_coin_error(error); } has_temporary_constraints_ = false; is_solved = false; } } double LPSolver::get_infinity() const { try { return lp_solver->getInfinity(); } catch (CoinError &error) { handle_coin_error(error); } } void LPSolver::set_objective_coefficients(const vector<double> &coefficients) { assert(static_cast<int>(coefficients.size()) == get_num_variables()); vector<int> indices(coefficients.size()); iota(indices.begin(), indices.end(), 0); try { lp_solver->setObjCoeffSet(indices.data(), indices.data() + indices.size(), coefficients.data()); } catch (CoinError &error) { handle_coin_error(error); } is_solved = false; } void LPSolver::set_objective_coefficient(int index, double coefficient) { assert(index < get_num_variables()); try { lp_solver->setObjCoeff(index, coefficient); } catch (CoinError &error) { handle_coin_error(error); } is_solved = false; } void LPSolver::set_constraint_lower_bound(int index, double bound) { assert(index < get_num_constraints()); try { lp_solver->setRowLower(index, bound); } catch (CoinError &error) { handle_coin_error(error); } is_solved = false; } void LPSolver::set_constraint_upper_bound(int index, double bound) { assert(index < get_num_constraints()); try { lp_solver->setRowUpper(index, bound); } catch (CoinError &error) { handle_coin_error(error); } is_solved = false; } void LPSolver::set_variable_lower_bound(int index, double bound) { assert(index < get_num_variables()); try { lp_solver->setColLower(index, bound); } catch (CoinError &error) { handle_coin_error(error); } is_solved = false; } void LPSolver::set_variable_upper_bound(int index, double bound) { assert(index < get_num_variables()); try { lp_solver->setColUpper(index, bound); } catch (CoinError &error) { handle_coin_error(error); } is_solved = false; } void LPSolver::solve() { try { if (is_initialized) { lp_solver->resolve(); } else { lp_solver->initialSolve(); is_initialized = true; } if (is_mip) { lp_solver->branchAndBound(); } if (lp_solver->isAbandoned()) { // The documentation of OSI is not very clear here but memory seems // to be the most common cause for this in our case. cerr << "Abandoned LP during resolve. " << "Reasons include \"numerical difficulties\" and running out of memory." << endl; utils::exit_with(ExitCode::SEARCH_CRITICAL_ERROR); } is_solved = true; } catch (CoinError &error) { handle_coin_error(error); } } void LPSolver::write_lp(const string &filename) const { try { lp_solver->writeLp(filename.c_str()); } catch (CoinError &error) { handle_coin_error(error); } } void LPSolver::print_failure_analysis() const { cout << "abandoned: " << lp_solver->isAbandoned() << endl; cout << "proven optimal: " << lp_solver->isProvenOptimal() << endl; cout << "proven primal infeasible: " << lp_solver->isProvenPrimalInfeasible() << endl; cout << "proven dual infeasible: " << lp_solver->isProvenDualInfeasible() << endl; cout << "dual objective limit reached: " << lp_solver->isDualObjectiveLimitReached() << endl; cout << "iteration limit reached: " << lp_solver->isIterationLimitReached() << endl; } bool LPSolver::has_optimal_solution() const { assert(is_solved); try { return !lp_solver->isProvenPrimalInfeasible() && !lp_solver->isProvenDualInfeasible() && lp_solver->isProvenOptimal(); } catch (CoinError &error) { handle_coin_error(error); } } double LPSolver::get_objective_value() const { assert(has_optimal_solution()); try { return lp_solver->getObjValue(); } catch (CoinError &error) { handle_coin_error(error); } } bool LPSolver::is_infeasible() const { assert(is_solved); try { return lp_solver->isProvenPrimalInfeasible() && !lp_solver->isProvenDualInfeasible() && !lp_solver->isProvenOptimal(); } catch (CoinError &error) { handle_coin_error(error); } } bool LPSolver::is_unbounded() const { assert(is_solved); try { return !lp_solver->isProvenPrimalInfeasible() && lp_solver->isProvenDualInfeasible() && !lp_solver->isProvenOptimal(); } catch (CoinError &error) { handle_coin_error(error); } } vector<double> LPSolver::extract_solution() const { assert(has_optimal_solution()); try { const double *sol = lp_solver->getColSolution(); return vector<double>(sol, sol + get_num_variables()); } catch (CoinError &error) { handle_coin_error(error); } } int LPSolver::get_num_variables() const { try { return lp_solver->getNumCols(); } catch (CoinError &error) { handle_coin_error(error); } } int LPSolver::get_num_constraints() const { try { return lp_solver->getNumRows(); } catch (CoinError &error) { handle_coin_error(error); } } int LPSolver::has_temporary_constraints() const { return has_temporary_constraints_; } void LPSolver::print_statistics() const { utils::g_log << "LP variables: " << get_num_variables() << endl; utils::g_log << "LP constraints: " << get_num_constraints() << endl; } #endif }
15,312
C++
29.810865
118
0.58686
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/lp/lp_internals.h
#ifndef LP_LP_INTERNALS_H #define LP_LP_INTERNALS_H /* This file provides some internal functions for the LP solver code. They could be implemented in linear_program.cc but we moved them here to avoid a long and complex file. They should not be necessary outside of linear_program.cc. If you need them, think about extending the LP class instead. */ #include "../utils/language.h" #include <memory> class CoinError; class OsiSolverInterface; namespace lp { enum class LPSolverType; std::unique_ptr<OsiSolverInterface> create_lp_solver(LPSolverType solver_type); /* Print the CoinError and then exit with ExitCode::SEARCH_CRITICAL_ERROR. Note that out-of-memory conditions occurring within CPLEX code cannot be caught by a try/catch block. When CPLEX runs out of memory, the planner will attempt to terminate gracefully, like it does with uncaught out-of-memory exceptions in other parts of the code. */ NO_RETURN void handle_coin_error(const CoinError &error); } #endif
998
C
26.749999
79
0.768537
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/lp/lp_solver.h
#ifndef LP_LP_SOLVER_H #define LP_LP_SOLVER_H #include "../algorithms/named_vector.h" #include "../utils/language.h" #include "../utils/system.h" #include <functional> #include <memory> #include <vector> /* All methods that use COIN specific classes only do something useful if the planner is compiled with USE_LP. Otherwise, they just print an error message and abort. */ #ifdef USE_LP #define LP_METHOD(X) X; #else #define LP_METHOD(X) NO_RETURN X { \ ABORT("LP method called but the planner was compiled without LP support.\n" \ "See http://www.fast-downward.org/LPBuildInstructions\n" \ "to install an LP solver and use it in the planner."); \ } #endif class CoinPackedVectorBase; class OsiSolverInterface; namespace options { class OptionParser; } namespace lp { enum class LPSolverType { CLP, CPLEX, GUROBI, SOPLEX }; enum class LPObjectiveSense { MAXIMIZE, MINIMIZE }; void add_lp_solver_option_to_parser(options::OptionParser &parser); class LinearProgram; class LPConstraint { std::vector<int> variables; std::vector<double> coefficients; double lower_bound; double upper_bound; public: LPConstraint(double lower_bound, double upper_bound); const std::vector<int> &get_variables() const {return variables;} const std::vector<double> &get_coefficients() const {return coefficients;} double get_lower_bound() const {return lower_bound;} void set_lower_bound(double lb) {lower_bound = lb;} double get_upper_bound() const {return upper_bound;} void set_upper_bound(double ub) {upper_bound = ub;} void clear(); bool empty() const; // Coefficients must be added without duplicate indices. void insert(int index, double coefficient); std::ostream &dump(std::ostream &stream, double infinity, const LinearProgram *program = nullptr); }; struct LPVariable { double lower_bound; double upper_bound; double objective_coefficient; bool is_integer; LPVariable(double lower_bound, double upper_bound, double objective_coefficient, bool is_integer = false); }; class LinearProgram { LPObjectiveSense sense; std::string objective_name; named_vector::NamedVector<LPVariable> variables; named_vector::NamedVector<LPConstraint> constraints; public: // objective_name is the name of the objective function used when writing the lp to a file. LinearProgram(LPObjectiveSense sense, named_vector::NamedVector<LPVariable> &&variables, named_vector::NamedVector<LPConstraint> &&constraints) : sense(sense), variables(std::move(variables)), constraints(std::move(constraints)) { } /* Variables and constraints can be given a custom name for debugging purposes. This has an impact on performance and should not be used in production code. */ named_vector::NamedVector<LPVariable> &get_variables(); named_vector::NamedVector<LPConstraint> &get_constraints(); const named_vector::NamedVector<LPVariable> &get_variables() const; const named_vector::NamedVector<LPConstraint> &get_constraints() const; LPObjectiveSense get_sense() const; void set_objective_name(std::string name); const std::string &get_objective_name() const; }; #ifdef __GNUG__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #endif class LPSolver { bool is_initialized; bool is_mip; bool is_solved; int num_permanent_constraints; bool has_temporary_constraints_; #ifdef USE_LP std::unique_ptr<OsiSolverInterface> lp_solver; #endif /* Temporary data for assigning a new problem. We keep the vectors around to avoid recreating them in every assignment. */ std::vector<double> elements; std::vector<int> indices; std::vector<int> starts; std::vector<double> col_lb; std::vector<double> col_ub; std::vector<double> objective; std::vector<double> row_lb; std::vector<double> row_ub; std::vector<CoinPackedVectorBase *> rows; void clear_temporary_data(); public: LP_METHOD(explicit LPSolver(LPSolverType solver_type)) /* Note that the destructor does not use LP_METHOD because it should not have the attribute NO_RETURN. It also cannot be set to the default destructor here (~LPSolver() = default;) because OsiSolverInterface is a forward declaration and the incomplete type cannot be destroyed. */ ~LPSolver(); LP_METHOD(void load_problem(const LinearProgram &lp)) LP_METHOD(void add_temporary_constraints(const std::vector<LPConstraint> &constraints)) LP_METHOD(void clear_temporary_constraints()) LP_METHOD(double get_infinity() const) LP_METHOD(void set_objective_coefficients(const std::vector<double> &coefficients)) LP_METHOD(void set_objective_coefficient(int index, double coefficient)) LP_METHOD(void set_constraint_lower_bound(int index, double bound)) LP_METHOD(void set_constraint_upper_bound(int index, double bound)) LP_METHOD(void set_variable_lower_bound(int index, double bound)) LP_METHOD(void set_variable_upper_bound(int index, double bound)) LP_METHOD(void solve()) LP_METHOD(void write_lp(const std::string &filename) const) LP_METHOD(void print_failure_analysis() const) LP_METHOD(bool is_infeasible() const) LP_METHOD(bool is_unbounded() const) /* Return true if the solving the LP showed that it is bounded feasible and the discovered solution is guaranteed to be optimal. We test for optimality explicitly because solving the LP sometimes finds suboptimal solutions due to numerical difficulties. The LP has to be solved with a call to solve() before calling this method. */ LP_METHOD(bool has_optimal_solution() const) /* Return the objective value found after solving an LP. The LP has to be solved with a call to solve() and has to have an optimal solution before calling this method. */ LP_METHOD(double get_objective_value() const) /* Return the solution found after solving an LP as a vector with one entry per variable. The LP has to be solved with a call to solve() and has to have an optimal solution before calling this method. */ LP_METHOD(std::vector<double> extract_solution() const) LP_METHOD(int get_num_variables() const) LP_METHOD(int get_num_constraints() const) LP_METHOD(int has_temporary_constraints() const) LP_METHOD(void print_statistics() const) }; #ifdef __GNUG__ #pragma GCC diagnostic pop #endif } #endif
6,672
C
32.034653
102
0.701139
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/lp/lp_internals.cc
#include "lp_internals.h" #ifdef USE_LP #include "lp_solver.h" #include "../utils/system.h" #ifdef __GNUG__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated" #pragma GCC diagnostic ignored "-Woverflow" #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wunused-parameter" #if __GNUC__ >= 6 #pragma GCC diagnostic ignored "-Wmisleading-indentation" #endif #endif #ifdef __clang__ #pragma GCC diagnostic ignored "-Wconstant-conversion" #endif #include <OsiSolverInterface.hpp> #ifdef COIN_HAS_CLP #include <OsiClpSolverInterface.hpp> #endif #ifdef COIN_HAS_CPX #include <OsiCpxSolverInterface.hpp> #include <cplex.h> #endif #ifdef COIN_HAS_GRB #include <OsiGrbSolverInterface.hpp> #endif #ifdef COIN_HAS_SPX #include <OsiSpxSolverInterface.hpp> #include <spxout.h> #endif #ifdef __GNUG__ #pragma GCC diagnostic pop #endif using namespace std; using utils::ExitCode; namespace lp { // CPLEX warning that is misleadingly reported with the severity of a critical error. static const string CPLEX_WARNING_COMPRESS = "CPX0000 Compressing row and column files."; // CPLEX warning from writeMps if no column names are defined. static const string CPLEX_WARNING_WRITE_MPS_COLUMNS = "CPX0000 Default column names x1, x2 ... being created."; static const string CPLEX_WARNING_WRITE_MPS_ROWS = "CPX0000 Default row names c1, c2 ... being created."; static const string CPLEX_ERROR_OOM = "CPX0000 CPLEX Error 1001: Out of memory."; static const string CPLEX_ERROR_OOM_PRE = "CPX0000 Insufficient memory for presolve."; static const string CPLEX_ERROR_OOM_DEVEX = "CPX0000 Not enough memory for devex."; static const string COIN_CPLEX_ERROR_OOM = "returned error 1001"; /* CPLEX sometimes does not report errors as exceptions and only prints an error message. This class will report any error messages as usual but will exit with a critical error afterwards. */ class ErrorCatchingCoinMessageHandler : public CoinMessageHandler { public: ErrorCatchingCoinMessageHandler() { setLogLevel(0); } ~ErrorCatchingCoinMessageHandler() { } virtual void checkSeverity() { /* Note that currentMessage_ should be used here but it doesn't help for clpex: currentMessage_.severity() is always "I" currentMessage_.externalNumber() is always 0 currentMessage_.detail() is always empty currentMessage_.message() also is empty (NFI) */ if (messageBuffer_ == CPLEX_WARNING_COMPRESS || messageBuffer_ == CPLEX_WARNING_WRITE_MPS_COLUMNS || messageBuffer_ == CPLEX_WARNING_WRITE_MPS_ROWS) { CoinMessageHandler::checkSeverity(); } else if (messageBuffer_ == CPLEX_ERROR_OOM || messageBuffer_ == CPLEX_ERROR_OOM_PRE || messageBuffer_ == CPLEX_ERROR_OOM_DEVEX) { utils::exit_with(ExitCode::SEARCH_OUT_OF_MEMORY); } else { utils::exit_with(ExitCode::SEARCH_CRITICAL_ERROR); } } }; unique_ptr<OsiSolverInterface> create_lp_solver(LPSolverType solver_type) { string missing_symbol; OsiSolverInterface *lp_solver = 0; switch (solver_type) { case LPSolverType::CLP: #ifdef COIN_HAS_CLP lp_solver = new OsiClpSolverInterface; #else missing_symbol = "COIN_HAS_CLP"; #endif break; case LPSolverType::CPLEX: #ifdef COIN_HAS_CPX { OsiCpxSolverInterface *cpx_solver = new OsiCpxSolverInterface; CPXsetintparam(cpx_solver->getEnvironmentPtr(), CPX_PARAM_THREADS, 1); cpx_solver->passInMessageHandler(new ErrorCatchingCoinMessageHandler); lp_solver = cpx_solver; } #else missing_symbol = "COIN_HAS_CPX"; #endif break; case LPSolverType::GUROBI: #ifdef COIN_HAS_GRB lp_solver = new OsiGrbSolverInterface; #else missing_symbol = "COIN_HAS_GRB"; #endif break; case LPSolverType::SOPLEX: #ifdef COIN_HAS_SPX { OsiSpxSolverInterface *spx_solver = new OsiSpxSolverInterface; spx_solver->getSPxOut()->setVerbosity(soplex::SPxOut::ERROR); lp_solver = spx_solver; } #else missing_symbol = "COIN_HAS_SPX"; #endif break; default: ABORT("Unknown LP solver type."); } if (lp_solver) { lp_solver->messageHandler()->setLogLevel(0); return unique_ptr<OsiSolverInterface>(lp_solver); } else { cerr << "You must build the planner with the " << missing_symbol << " symbol defined" << endl; utils::exit_with(ExitCode::SEARCH_CRITICAL_ERROR); } } NO_RETURN void handle_coin_error(const CoinError &error) { if (error.message().find(COIN_CPLEX_ERROR_OOM) != string::npos) { cout << "CPLEX ran out of memory during OSI method." << endl << "Coin exception: " << error.message() << endl << " from method " << error.methodName() << endl << " from class " << error.className() << endl; utils::exit_with(ExitCode::SEARCH_OUT_OF_MEMORY); } else { cerr << "Coin threw exception: " << error.message() << endl << " from method " << error.methodName() << endl << " from class " << error.className() << endl; utils::exit_with(ExitCode::SEARCH_CRITICAL_ERROR); } } } #endif
5,400
C++
31.536144
112
0.658704
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_collection_generator_single_cegar.cc
#include "pattern_collection_generator_single_cegar.h" #include "cegar.h" #include "utils.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/markup.h" #include "../utils/rng_options.h" using namespace std; namespace pdbs { PatternCollectionGeneratorSingleCegar::PatternCollectionGeneratorSingleCegar( const options::Options &opts) : max_pdb_size(opts.get<int>("max_pdb_size")), max_collection_size(opts.get<int>("max_collection_size")), use_wildcard_plans(opts.get<bool>("use_wildcard_plans")), max_time(opts.get<double>("max_time")), verbosity(opts.get<utils::Verbosity>("verbosity")), rng(utils::parse_rng_from_options(opts)) { } PatternCollectionInformation PatternCollectionGeneratorSingleCegar::generate( const shared_ptr<AbstractTask> &task) { // Store the set of goals in random order. TaskProxy task_proxy(*task); vector<FactPair> goals = get_goals_in_random_order(task_proxy, *rng); CEGAR cegar( max_pdb_size, max_collection_size, use_wildcard_plans, max_time, verbosity, rng, task, move(goals)); return cegar.compute_pattern_collection(); } static shared_ptr<PatternCollectionGenerator> _parse( options::OptionParser &parser) { parser.document_synopsis( "Single CEGAR", "This pattern collection generator implements the single CEGAR algorithm " "described in the paper" + utils::format_conference_reference( {"Alexander Rovner", "Silvan Sievers", "Malte Helmert"}, "Counterexample-Guided Abstraction Refinement for Pattern Selection " "in Optimal Classical Planning", "https://ai.dmi.unibas.ch/papers/rovner-et-al-icaps2019.pdf", "Proceedings of the 29th International Conference on Automated " "Planning and Scheduling (ICAPS 2019)", "362-367", "AAAI Press", "2019")); add_implementation_notes_to_parser(parser); add_cegar_options_to_parser(parser); utils::add_verbosity_option_to_parser(parser); utils::add_rng_options(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; return make_shared<PatternCollectionGeneratorSingleCegar>(opts); } static Plugin<PatternCollectionGenerator> _plugin("single_cegar", _parse); }
2,390
C++
32.208333
82
0.669038
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_collection_generator_multiple_cegar.cc
#include "pattern_collection_generator_multiple_cegar.h" #include "cegar.h" #include "pattern_database.h" #include "utils.h" #include "../option_parser.h" #include "../plugin.h" #include "../utils/countdown_timer.h" #include "../utils/markup.h" #include "../utils/rng.h" #include "../utils/rng_options.h" #include <vector> using namespace std; namespace pdbs { PatternCollectionGeneratorMultipleCegar::PatternCollectionGeneratorMultipleCegar( options::Options &opts) : max_pdb_size(opts.get<int>("max_pdb_size")), max_collection_size(opts.get<int>("max_collection_size")), use_wildcard_plans(opts.get<bool>("use_wildcard_plans")), cegar_max_time(opts.get<double>("max_time")), verbosity(opts.get<utils::Verbosity>("verbosity")), rng(utils::parse_rng_from_options(opts)), random_seed(opts.get<int>("random_seed")), stagnation_limit(opts.get<double>("stagnation_limit")), blacklist_trigger_percentage(opts.get<double>("blacklist_trigger_percentage")), enable_blacklist_on_stagnation(opts.get<bool>("enable_blacklist_on_stagnation")), total_max_time(opts.get<double>("total_max_time")), blacklisting(false), stagnation_start_time(-1), remaining_collection_size(max_collection_size) { } void PatternCollectionGeneratorMultipleCegar::check_blacklist_trigger_timer( double blacklisting_start_time, const utils::CountdownTimer &timer) { // Check if blacklisting should be started. if (!blacklisting && timer.get_elapsed_time() > blacklisting_start_time) { blacklisting = true; // Also reset stagnation timer in case it was already set. stagnation_start_time = -1; if (verbosity >= utils::Verbosity::NORMAL) { utils::g_log << "given percentage of total time limit " << "exhausted; enabling blacklisting." << endl; } } } unordered_set<int> PatternCollectionGeneratorMultipleCegar::get_blacklisted_variables( vector<int> &non_goal_variables) { unordered_set<int> blacklisted_variables; if (blacklisting && !non_goal_variables.empty()) { /* Randomize the number of non-goal variables for blacklisting. We want to choose at least 1 non-goal variable and up to the entire set of non-goal variables. */ int blacklist_size = (*rng)(non_goal_variables.size()); ++blacklist_size; // [1, |non-goal variables|] rng->shuffle(non_goal_variables); blacklisted_variables.insert( non_goal_variables.begin(), non_goal_variables.begin() + blacklist_size); if (verbosity >= utils::Verbosity::DEBUG) { utils::g_log << "blacklisting " << blacklist_size << " out of " << non_goal_variables.size() << " non-goal variables: "; for (int var : blacklisted_variables) { utils::g_log << var << ", "; } utils::g_log << endl; } } return blacklisted_variables; } void PatternCollectionGeneratorMultipleCegar::handle_generated_pattern( PatternCollectionInformation &&collection_info, set<Pattern> &generated_patterns, shared_ptr<PDBCollection> &generated_pdbs, const utils::CountdownTimer &timer) { shared_ptr<PatternCollection> new_patterns = collection_info.get_patterns(); if (new_patterns->size() > 1) { cerr << "a generator computed more than one pattern" << endl; utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR); } const Pattern &pattern = new_patterns->front(); if (verbosity >= utils::Verbosity::DEBUG) { utils::g_log << "generated patterns " << pattern << endl; } if (generated_patterns.insert(pattern).second) { // CEGAR generated a new pattern. Reset stagnation_start_time. stagnation_start_time = -1; shared_ptr<PDBCollection> new_pdbs = collection_info.get_pdbs(); shared_ptr<PatternDatabase> &pdb = new_pdbs->front(); remaining_collection_size -= pdb->get_size(); generated_pdbs->push_back(move(pdb)); } else { // Pattern is not new. Set stagnation start time if not already set. if (stagnation_start_time == -1) { stagnation_start_time = timer.get_elapsed_time(); } } } bool PatternCollectionGeneratorMultipleCegar::collection_size_limit_reached() const { if (remaining_collection_size <= 0) { /* This value can become negative if the given size limits for pdb or collection size are so low that even the singleton goal pattern generated by CEGAR violates it. */ if (verbosity >= utils::Verbosity::NORMAL) { utils::g_log << "collection size limit reached" << endl; } return true; } return false; } bool PatternCollectionGeneratorMultipleCegar::time_limit_reached( const utils::CountdownTimer &timer) const { if (timer.is_expired()) { if (verbosity >= utils::Verbosity::NORMAL) { utils::g_log << "time limit reached" << endl; } return true; } return false; } bool PatternCollectionGeneratorMultipleCegar::check_for_stagnation( const utils::CountdownTimer &timer) { // Test if no new pattern was generated for longer than stagnation_limit. if (stagnation_start_time != -1 && timer.get_elapsed_time() - stagnation_start_time > stagnation_limit) { if (enable_blacklist_on_stagnation) { if (blacklisting) { if (verbosity >= utils::Verbosity::NORMAL) { utils::g_log << "stagnation limit reached " << "despite blacklisting, terminating" << endl; } return true; } else { if (verbosity >= utils::Verbosity::NORMAL) { utils::g_log << "stagnation limit reached, " << "enabling blacklisting" << endl; } blacklisting = true; stagnation_start_time = -1; } } else { if (verbosity >= utils::Verbosity::NORMAL) { utils::g_log << "stagnation limit reached, terminating" << endl; } return true; } } return false; } PatternCollectionInformation get_pattern_collection( const TaskProxy &task_proxy, const shared_ptr<PDBCollection> &pdbs) { shared_ptr<PatternCollection> patterns = make_shared<PatternCollection>(); patterns->reserve(pdbs->size()); for (const shared_ptr<PatternDatabase> &pdb : *pdbs) { patterns->push_back(pdb->get_pattern()); } PatternCollectionInformation result(task_proxy, patterns); result.set_pdbs(pdbs); return result; } PatternCollectionInformation PatternCollectionGeneratorMultipleCegar::generate( const shared_ptr<AbstractTask> &task) { if (verbosity >= utils::Verbosity::NORMAL) { utils::g_log << "Generating patterns using the multiple CEGAR algorithm" << endl; } TaskProxy task_proxy(*task); utils::CountdownTimer timer(total_max_time); // Store the set of goals in random order. vector<FactPair> goals = get_goals_in_random_order(task_proxy, *rng); // Store the non-goal variables for potential blacklisting. vector<int> non_goal_variables = get_non_goal_variables(task_proxy); if (verbosity >= utils::Verbosity::DEBUG) { utils::g_log << "goal variables: "; for (FactPair goal : goals) { utils::g_log << goal.var << ", "; } utils::g_log << endl; utils::g_log << "non-goal variables: " << non_goal_variables << endl; } // Collect all unique patterns and their PDBs. set<Pattern> generated_patterns; shared_ptr<PDBCollection> generated_pdbs = make_shared<PDBCollection>(); int num_iterations = 1; int goal_index = 0; const utils::Verbosity cegar_verbosity(utils::Verbosity::SILENT); shared_ptr<utils::RandomNumberGenerator> cegar_rng = make_shared<utils::RandomNumberGenerator>(random_seed); /* Start blacklisting after the percentage of total_max_time specified via blacklisting_trigger_percentage has passed. Compute this time point once. */ double blacklisting_start_time = total_max_time * blacklist_trigger_percentage; while (true) { check_blacklist_trigger_timer(blacklisting_start_time, timer); unordered_set<int> blacklisted_variables = get_blacklisted_variables(non_goal_variables); int remaining_pdb_size_for_cegar = min(remaining_collection_size, max_pdb_size); double remaining_time_for_cegar = min(static_cast<double>(timer.get_remaining_time()), cegar_max_time); /* Call CEGAR with the remaining size budget (limiting one of pdb and collection size would be enough, but this is cleaner), with the remaining time limit and an RNG instance with a different random seed in each iteration. */ CEGAR cegar( remaining_pdb_size_for_cegar, remaining_collection_size, use_wildcard_plans, remaining_time_for_cegar, cegar_verbosity, cegar_rng, task, {goals[goal_index]}, move(blacklisted_variables)); PatternCollectionInformation collection_info = cegar.compute_pattern_collection(); handle_generated_pattern( move(collection_info), generated_patterns, generated_pdbs, timer); if (collection_size_limit_reached() || time_limit_reached(timer) || check_for_stagnation(timer)) { break; } ++num_iterations; ++goal_index; goal_index = goal_index % goals.size(); assert(utils::in_bounds(goal_index, goals)); } PatternCollectionInformation result = get_pattern_collection(task_proxy, generated_pdbs); if (verbosity >= utils::Verbosity::NORMAL) { utils::g_log << "Multiple CEGAR number of iterations: " << num_iterations << endl; utils::g_log << "Multiple CEGAR average time per generator: " << timer.get_elapsed_time() / num_iterations << endl; dump_pattern_collection_generation_statistics( "Multiple CEGAR", timer.get_elapsed_time(), result); } return result; } static shared_ptr<PatternCollectionGenerator> _parse(options::OptionParser &parser) { parser.document_synopsis( "Multiple CEGAR", "This pattern collection generator implements the multiple CEGAR algorithm " "described in the paper" + utils::format_conference_reference( {"Alexander Rovner", "Silvan Sievers", "Malte Helmert"}, "Counterexample-Guided Abstraction Refinement for Pattern Selection " "in Optimal Classical Planning", "https://ai.dmi.unibas.ch/papers/rovner-et-al-icaps2019.pdf", "Proceedings of the 29th International Conference on Automated " "Planning and Scheduling (ICAPS 2019)", "362-367", "AAAI Press", "2019")); add_implementation_notes_to_parser(parser); parser.add_option<double>( "total_max_time", "maximum time in seconds for the multiple CEGAR algorithm. The " "algorithm will always execute at least one iteration, i.e., call the " "CEGAR algorithm once. This limit possibly overrides the limit " "specified for the CEGAR algorithm.", "100.0", Bounds("0.0", "infinity")); parser.add_option<double>( "stagnation_limit", "maximum time in seconds the multiple CEGAR algorithm allows without " "generating a new pattern through the CEGAR algorithm. The multiple " "CEGAR algorithm terminates prematurely if this limit is hit unless " "enable_blacklist_on_stagnation is enabled.", "20.0", Bounds("1.0", "infinity")); parser.add_option<double>( "blacklist_trigger_percentage", "percentage of total_max_time after which the multiple CEGAR " "algorithm enables blacklisting for diversification", "0.75", Bounds("0.0", "1.0")); parser.add_option<bool>( "enable_blacklist_on_stagnation", "If true, the multiple CEGAR algorithm will enable blacklisting " "for diversification when stagnation_limit is hit for the first time " "(unless it was already enabled due to blacklist_trigger_percentage) " "and terminate when stagnation_limit is hit for the second time.", "true"); add_cegar_options_to_parser(parser); utils::add_verbosity_option_to_parser(parser); utils::add_rng_options(parser); Options opts = parser.parse(); if (parser.dry_run()) { return nullptr; } return make_shared<PatternCollectionGeneratorMultipleCegar>(opts); } static Plugin<PatternCollectionGenerator> _plugin("multiple_cegar", _parse); }
13,256
C++
38.573134
93
0.623944
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_collection_generator_multiple_cegar.h
#ifndef PDBS_PATTERN_COLLECTION_GENERATOR_MULTIPLE_CEGAR_H #define PDBS_PATTERN_COLLECTION_GENERATOR_MULTIPLE_CEGAR_H #include "pattern_generator.h" #include <set> namespace options { class Options; } namespace utils { class CountdownTimer; class RandomNumberGenerator; enum class Verbosity; } namespace pdbs { class PatternCollectionGeneratorMultipleCegar : public PatternCollectionGenerator { // Options for the CEGAR algorithm. const int max_pdb_size; const int max_collection_size; const bool use_wildcard_plans; const double cegar_max_time; // Options for this generator. const utils::Verbosity verbosity; std::shared_ptr<utils::RandomNumberGenerator> rng; // We store the random seed for creating different RNG objects for CEGAR. const int random_seed; const double stagnation_limit; const double blacklist_trigger_percentage; const bool enable_blacklist_on_stagnation; const double total_max_time; // Variables used in the main loop. bool blacklisting; double stagnation_start_time; int remaining_collection_size; void check_blacklist_trigger_timer( double blacklisting_start_time, const utils::CountdownTimer &timer); std::unordered_set<int> get_blacklisted_variables( std::vector<int> &non_goal_variables); void handle_generated_pattern( PatternCollectionInformation &&collection_info, std::set<Pattern> &generated_patterns, std::shared_ptr<PDBCollection> &generated_pdbs, const utils::CountdownTimer &timer); bool collection_size_limit_reached() const; bool time_limit_reached(const utils::CountdownTimer &timer) const; bool check_for_stagnation(const utils::CountdownTimer &timer); public: explicit PatternCollectionGeneratorMultipleCegar(options::Options &opts); virtual ~PatternCollectionGeneratorMultipleCegar() = default; virtual PatternCollectionInformation generate( const std::shared_ptr<AbstractTask> &task) override; }; } #endif
2,023
C
31.126984
83
0.742462
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_cliques.cc
#include "pattern_cliques.h" #include "pattern_database.h" #include "../task_proxy.h" #include "../algorithms/max_cliques.h" using namespace std; namespace pdbs { bool are_patterns_additive(const Pattern &pattern1, const Pattern &pattern2, const VariableAdditivity &are_additive) { for (int v1 : pattern1) { for (int v2 : pattern2) { if (!are_additive[v1][v2]) { return false; } } } return true; } VariableAdditivity compute_additive_vars(const TaskProxy &task_proxy) { VariableAdditivity are_additive; int num_vars = task_proxy.get_variables().size(); are_additive.resize(num_vars, vector<bool>(num_vars, true)); for (OperatorProxy op : task_proxy.get_operators()) { for (EffectProxy e1 : op.get_effects()) { for (EffectProxy e2 : op.get_effects()) { int e1_var_id = e1.get_fact().get_variable().get_id(); int e2_var_id = e2.get_fact().get_variable().get_id(); are_additive[e1_var_id][e2_var_id] = false; } } } return are_additive; } shared_ptr<vector<PatternClique>> compute_pattern_cliques( const PatternCollection &patterns, const VariableAdditivity &are_additive) { // Initialize compatibility graph. vector<vector<int>> cgraph; cgraph.resize(patterns.size()); for (size_t i = 0; i < patterns.size(); ++i) { for (size_t j = i + 1; j < patterns.size(); ++j) { if (are_patterns_additive(patterns[i], patterns[j], are_additive)) { /* If the two patterns are additive, there is an edge in the compatibility graph. */ cgraph[i].push_back(j); cgraph[j].push_back(i); } } } shared_ptr<vector<PatternClique>> max_cliques = make_shared<vector<PatternClique>>(); max_cliques::compute_max_cliques(cgraph, *max_cliques); return max_cliques; } vector<PatternClique> compute_pattern_cliques_with_pattern( const PatternCollection &patterns, const vector<PatternClique> &known_pattern_cliques, const Pattern &new_pattern, const VariableAdditivity &are_additive) { vector<PatternClique> cliques_additive_with_pattern; for (const PatternClique &known_clique : known_pattern_cliques) { // Take all patterns which are additive to new_pattern. PatternClique new_clique; new_clique.reserve(known_clique.size()); for (PatternID pattern_id : known_clique) { if (are_patterns_additive( new_pattern, patterns[pattern_id], are_additive)) { new_clique.push_back(pattern_id); } } if (!new_clique.empty()) { cliques_additive_with_pattern.push_back(new_clique); } } if (cliques_additive_with_pattern.empty()) { // If nothing was additive with the new variable, then // the only clique is the empty set. cliques_additive_with_pattern.emplace_back(); } return cliques_additive_with_pattern; } }
3,146
C++
33.582417
89
0.601399
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_generator.h
#ifndef PDBS_PATTERN_GENERATOR_H #define PDBS_PATTERN_GENERATOR_H #include "pattern_collection_information.h" #include "pattern_information.h" #include "types.h" #include <memory> class AbstractTask; namespace pdbs { class PatternCollectionGenerator { public: virtual ~PatternCollectionGenerator() = default; virtual PatternCollectionInformation generate( const std::shared_ptr<AbstractTask> &task) = 0; }; class PatternGenerator { public: virtual ~PatternGenerator() = default; virtual PatternInformation generate(const std::shared_ptr<AbstractTask> &task) = 0; }; } #endif
607
C
19.266666
87
0.749588
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_information.h
#ifndef PDBS_PATTERN_INFORMATION_H #define PDBS_PATTERN_INFORMATION_H #include "types.h" #include "../task_proxy.h" #include <memory> namespace pdbs { /* This class is a wrapper for a pair of a pattern and the corresponding PDB. It always contains a pattern and can contain the computed PDB. If the latter is not set, it is computed on demand. Ownership of the information is shared between the creators of this class (usually PatternGenerators), the class itself, and its users (consumers of patterns like heuristics). TODO: consider using this class not for shared ownership but for actual ownership transfer, from the generator to the user. */ class PatternInformation { TaskProxy task_proxy; Pattern pattern; std::shared_ptr<PatternDatabase> pdb; void create_pdb_if_missing(); bool information_is_valid() const; public: PatternInformation(const TaskProxy &task_proxy, Pattern pattern); void set_pdb(const std::shared_ptr<PatternDatabase> &pdb); TaskProxy get_task_proxy() const { return task_proxy; } const Pattern &get_pattern() const; std::shared_ptr<PatternDatabase> get_pdb(); }; } #endif
1,176
C
25.155555
78
0.72619
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_collection_generator_combo.h
#ifndef PDBS_PATTERN_COLLECTION_GENERATOR_COMBO_H #define PDBS_PATTERN_COLLECTION_GENERATOR_COMBO_H #include "pattern_generator.h" namespace pdbs { /* Take one large pattern and then single-variable patterns for all goal variables that are not in the large pattern. */ class PatternCollectionGeneratorCombo : public PatternCollectionGenerator { int max_states; public: explicit PatternCollectionGeneratorCombo(const options::Options &opts); virtual ~PatternCollectionGeneratorCombo() = default; virtual PatternCollectionInformation generate( const std::shared_ptr<AbstractTask> &task) override; }; } #endif
638
C
29.42857
75
0.778997
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/zero_one_pdbs_heuristic.cc
#include "zero_one_pdbs_heuristic.h" #include "pattern_generator.h" #include "../option_parser.h" #include "../plugin.h" #include <limits> using namespace std; namespace pdbs { ZeroOnePDBs get_zero_one_pdbs_from_options( const shared_ptr<AbstractTask> &task, const Options &opts) { shared_ptr<PatternCollectionGenerator> pattern_generator = opts.get<shared_ptr<PatternCollectionGenerator>>("patterns"); PatternCollectionInformation pattern_collection_info = pattern_generator->generate(task); shared_ptr<PatternCollection> patterns = pattern_collection_info.get_patterns(); TaskProxy task_proxy(*task); return ZeroOnePDBs(task_proxy, *patterns); } ZeroOnePDBsHeuristic::ZeroOnePDBsHeuristic( const options::Options &opts) : Heuristic(opts), zero_one_pdbs(get_zero_one_pdbs_from_options(task, opts)) { } int ZeroOnePDBsHeuristic::compute_heuristic(const State &ancestor_state) { State state = convert_ancestor_state(ancestor_state); int h = zero_one_pdbs.get_value(state); if (h == numeric_limits<int>::max()) return DEAD_END; return h; } static shared_ptr<Heuristic> _parse(OptionParser &parser) { parser.document_synopsis( "Zero-One PDB", "The zero/one pattern database heuristic is simply the sum of the " "heuristic values of all patterns in the pattern collection. In contrast " "to the canonical pattern database heuristic, there is no need to check " "for additive subsets, because the additivity of the patterns is " "guaranteed by action cost partitioning. This heuristic uses the most " "simple form of action cost partitioning, i.e. if an operator affects " "more than one pattern in the collection, its costs are entirely taken " "into account for one pattern (the first one which it affects) and set " "to zero for all other affected patterns."); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "not supported"); parser.document_language_support("axioms", "not supported"); parser.document_property("admissible", "yes"); parser.document_property("consistent", "yes"); parser.document_property("safe", "yes"); parser.document_property("preferred operators", "no"); parser.add_option<shared_ptr<PatternCollectionGenerator>>( "patterns", "pattern generation method", "systematic(1)"); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; return make_shared<ZeroOnePDBsHeuristic>(opts); } static Plugin<Evaluator> _plugin("zopdbs", _parse, "heuristics_pdb"); }
2,760
C++
36.31081
82
0.698551
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/zero_one_pdbs.h
#ifndef PDBS_ZERO_ONE_PDBS_H #define PDBS_ZERO_ONE_PDBS_H #include "types.h" class State; class TaskProxy; namespace pdbs { class ZeroOnePDBs { PDBCollection pattern_databases; public: ZeroOnePDBs(const TaskProxy &task_proxy, const PatternCollection &patterns); ~ZeroOnePDBs() = default; int get_value(const State &state) const; /* Returns the sum of all mean finite h-values of every PDB. This is an approximation of the real mean finite h-value of the Heuristic, because dead-ends are ignored for the computation of the mean finite h-values for a PDB. As a consequence, if different PDBs have different states which are dead-end, we do not calculate the real mean h-value for these states. */ double compute_approx_mean_finite_h() const; void dump() const; }; } #endif
844
C
26.258064
80
0.707346
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_database.h
#ifndef PDBS_PATTERN_DATABASE_H #define PDBS_PATTERN_DATABASE_H #include "types.h" #include "../task_proxy.h" #include <utility> #include <vector> namespace utils { class RandomNumberGenerator; } namespace pdbs { class AbstractOperator { /* This class represents an abstract operator how it is needed for the regression search performed during the PDB-construction. As all abstract states are represented as a number, abstract operators don't have "usual" effects but "hash effects", i.e. the change (as number) the abstract operator implies on a given abstract state. */ int concrete_op_id; int cost; /* Preconditions for the regression search, corresponds to normal effects and prevail of concrete operators. */ std::vector<FactPair> regression_preconditions; /* Effect of the operator during regression search on a given abstract state number. */ int hash_effect; public: /* Abstract operators are built from concrete operators. The parameters follow the usual name convention of SAS+ operators, meaning prevail, preconditions and effects are all related to progression search. */ AbstractOperator(const std::vector<FactPair> &prevail, const std::vector<FactPair> &preconditions, const std::vector<FactPair> &effects, int cost, const std::vector<int> &hash_multipliers, int concrete_op_id); ~AbstractOperator(); /* Returns variable value pairs which represent the preconditions of the abstract operator in a regression search */ const std::vector<FactPair> &get_regression_preconditions() const { return regression_preconditions; } /* Returns the effect of the abstract operator in form of a value change (+ or -) to an abstract state index */ int get_hash_effect() const {return hash_effect;} int get_concrete_op_id() const { return concrete_op_id; } /* Returns the cost of the abstract operator (same as the cost of the original concrete operator) */ int get_cost() const {return cost;} void dump(const Pattern &pattern, const VariablesProxy &variables) const; }; // Implements a single pattern database class PatternDatabase { Pattern pattern; // size of the PDB int num_states; /* final h-values for abstract-states. dead-ends are represented by numeric_limits<int>::max() */ std::vector<int> distances; std::vector<int> generating_op_ids; std::vector<std::vector<OperatorID>> wildcard_plan; // multipliers for each variable for perfect hash function std::vector<int> hash_multipliers; /* Recursive method; called by build_abstract_operators. In the case of a precondition with value = -1 in the concrete operator, all multiplied out abstract operators are computed, i.e. for all possible values of the variable (with precondition = -1), one abstract operator with a concrete value (!= -1) is computed. */ void multiply_out( int pos, int cost, std::vector<FactPair> &prev_pairs, std::vector<FactPair> &pre_pairs, std::vector<FactPair> &eff_pairs, const std::vector<FactPair> &effects_without_pre, const VariablesProxy &variables, int concrete_op_id, std::vector<AbstractOperator> &operators); /* Computes all abstract operators for a given concrete operator (by its global operator number). Initializes data structures for initial call to recursive method multiply_out. variable_to_index maps variables in the task to their index in the pattern or -1. */ void build_abstract_operators( const OperatorProxy &op, int cost, const std::vector<int> &variable_to_index, const VariablesProxy &variables, std::vector<AbstractOperator> &operators); /* Computes all abstract operators, builds the match tree (successor generator) and then does a Dijkstra regression search to compute all final h-values (stored in distances). operator_costs can specify individual operator costs for each operator for action cost partitioning. If left empty, default operator costs are used. */ void create_pdb( const TaskProxy &task_proxy, const std::vector<int> &operator_costs, bool compute_plan, const std::shared_ptr<utils::RandomNumberGenerator> &rng, bool compute_wildcard_plan); /* For a given abstract state (given as index), the according values for each variable in the state are computed and compared with the given pairs of goal variables and values. Returns true iff the state is a goal state. */ bool is_goal_state( int state_index, const std::vector<FactPair> &abstract_goals, const VariablesProxy &variables) const; /* The given concrete state is used to calculate the index of the according abstract state. This is only used for table lookup (distances) during search. */ int hash_index(const std::vector<int> &state) const; public: /* Important: It is assumed that the pattern (passed via Options) is sorted, contains no duplicates and is small enough so that the number of abstract states is below numeric_limits<int>::max() Parameters: dump: If set to true, prints the construction time. operator_costs: Can specify individual operator costs for each operator. This is useful for action cost partitioning. If left empty, default operator costs are used. compute_plan: if true, compute an optimal plan when computing distances of the PDB. This requires a RNG object passed via rng. compute_wildcard_plan: when computing a plan (see compute_plan), compute a wildcard plan, i.e., a sequence of parallel operators inducing an optimal plan. Otherwise, compute a simple plan (a sequence of operators). */ PatternDatabase( const TaskProxy &task_proxy, const Pattern &pattern, bool dump = false, const std::vector<int> &operator_costs = std::vector<int>(), bool compute_plan = false, const std::shared_ptr<utils::RandomNumberGenerator> &rng = nullptr, bool compute_wildcard_plan = false); ~PatternDatabase() = default; int get_value(const std::vector<int> &state) const; // Returns the pattern (i.e. all variables used) of the PDB const Pattern &get_pattern() const { return pattern; } // Returns the size (number of abstract states) of the PDB int get_size() const { return num_states; } std::vector<std::vector<OperatorID>> && extract_wildcard_plan() { return std::move(wildcard_plan); }; /* Returns the average h-value over all states, where dead-ends are ignored (they neither increase the sum of all h-values nor the number of entries for the mean value calculation). If all states are dead-ends, infinity is returned. Note: This is only calculated when called; avoid repeated calls to this method! */ double compute_mean_finite_h() const; // Returns true iff op has an effect on a variable in the pattern. bool is_operator_relevant(const OperatorProxy &op) const; }; } #endif
7,550
C
33.479452
80
0.664503
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/canonical_pdbs.h
#ifndef PDBS_CANONICAL_PDBS_H #define PDBS_CANONICAL_PDBS_H #include "types.h" #include <memory> class State; namespace pdbs { class CanonicalPDBs { std::shared_ptr<PDBCollection> pdbs; std::shared_ptr<std::vector<PatternClique>> pattern_cliques; public: CanonicalPDBs( const std::shared_ptr<PDBCollection> &pdbs, const std::shared_ptr<std::vector<PatternClique>> &pattern_cliques); ~CanonicalPDBs() = default; int get_value(const State &state) const; }; } #endif
507
C
18.538461
76
0.698225
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/zero_one_pdbs_heuristic.h
#ifndef PDBS_ZERO_ONE_PDBS_HEURISTIC_H #define PDBS_ZERO_ONE_PDBS_HEURISTIC_H #include "zero_one_pdbs.h" #include "../heuristic.h" namespace pdbs { class PatternDatabase; class ZeroOnePDBsHeuristic : public Heuristic { ZeroOnePDBs zero_one_pdbs; protected: virtual int compute_heuristic(const State &ancestor_state) override; public: ZeroOnePDBsHeuristic(const options::Options &opts); virtual ~ZeroOnePDBsHeuristic() = default; }; } #endif
462
C
20.045454
72
0.751082
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_collection_generator_combo.cc
#include "pattern_collection_generator_combo.h" #include "pattern_generator_greedy.h" #include "utils.h" #include "validation.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_proxy.h" #include "../utils/logging.h" #include "../utils/timer.h" #include <iostream> #include <memory> #include <set> using namespace std; namespace pdbs { PatternCollectionGeneratorCombo::PatternCollectionGeneratorCombo(const Options &opts) : max_states(opts.get<int>("max_states")) { } PatternCollectionInformation PatternCollectionGeneratorCombo::generate( const shared_ptr<AbstractTask> &task) { utils::Timer timer; utils::g_log << "Generating patterns using the combo generator..." << endl; TaskProxy task_proxy(*task); shared_ptr<PatternCollection> patterns = make_shared<PatternCollection>(); PatternGeneratorGreedy large_pattern_generator(max_states); const Pattern &large_pattern = large_pattern_generator.generate(task).get_pattern(); patterns->push_back(large_pattern); set<int> used_vars(large_pattern.begin(), large_pattern.end()); for (FactProxy goal : task_proxy.get_goals()) { int goal_var_id = goal.get_variable().get_id(); if (!used_vars.count(goal_var_id)) patterns->emplace_back(1, goal_var_id); } PatternCollectionInformation pci(task_proxy, patterns); dump_pattern_collection_generation_statistics( "Combo generator", timer(), pci); return pci; } static shared_ptr<PatternCollectionGenerator> _parse(OptionParser &parser) { parser.add_option<int>( "max_states", "maximum abstraction size for combo strategy", "1000000", Bounds("1", "infinity")); Options opts = parser.parse(); if (parser.dry_run()) return nullptr; return make_shared<PatternCollectionGeneratorCombo>(opts); } static Plugin<PatternCollectionGenerator> _plugin("combo", _parse); }
1,936
C++
28.8
88
0.695764
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_information.cc
#include "pattern_information.h" #include "pattern_database.h" #include "validation.h" #include <cassert> using namespace std; namespace pdbs { PatternInformation::PatternInformation( const TaskProxy &task_proxy, Pattern pattern) : task_proxy(task_proxy), pattern(move(pattern)), pdb(nullptr) { validate_and_normalize_pattern(task_proxy, this->pattern); } bool PatternInformation::information_is_valid() const { return !pdb || pdb->get_pattern() == pattern; } void PatternInformation::create_pdb_if_missing() { if (!pdb) { pdb = make_shared<PatternDatabase>(task_proxy, pattern); } } void PatternInformation::set_pdb(const shared_ptr<PatternDatabase> &pdb_) { pdb = pdb_; assert(information_is_valid()); } const Pattern &PatternInformation::get_pattern() const { return pattern; } shared_ptr<PatternDatabase> PatternInformation::get_pdb() { create_pdb_if_missing(); return pdb; } }
959
C++
20.818181
75
0.691345
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_collection_generator_hillclimbing.cc
#include "pattern_collection_generator_hillclimbing.h" #include "canonical_pdbs_heuristic.h" #include "incremental_canonical_pdbs.h" #include "pattern_database.h" #include "utils.h" #include "validation.h" #include "../option_parser.h" #include "../plugin.h" #include "../task_utils/causal_graph.h" #include "../task_utils/sampling.h" #include "../task_utils/task_properties.h" #include "../utils/collections.h" #include "../utils/countdown_timer.h" #include "../utils/logging.h" #include "../utils/markup.h" #include "../utils/math.h" #include "../utils/memory.h" #include "../utils/rng.h" #include "../utils/rng_options.h" #include "../utils/timer.h" #include <algorithm> #include <cassert> #include <iostream> #include <limits> using namespace std; namespace pdbs { /* Since this exception class is only used for control flow and thus has no need for an error message, we use a standalone class instead of inheriting from utils::Exception. */ class HillClimbingTimeout { }; static vector<int> get_goal_variables(const TaskProxy &task_proxy) { vector<int> goal_vars; GoalsProxy goals = task_proxy.get_goals(); goal_vars.reserve(goals.size()); for (FactProxy goal : goals) { goal_vars.push_back(goal.get_variable().get_id()); } assert(utils::is_sorted_unique(goal_vars)); return goal_vars; } /* When growing a pattern, we only want to consider successor patterns that are *interesting*. A pattern is interesting if the subgraph of the causal graph induced by the pattern satisfies the following two properties: A. it is weakly connected (considering all kinds of arcs) B. from every variable in the pattern, a goal variable is reachable by a path that only uses pre->eff arcs We can use the assumption that the pattern we want to extend is already interesting, so the question is how an interesting pattern can be obtained from an interesting pattern by adding one variable. There are two ways to do this: 1. Add a *predecessor* of an existing variable along a pre->eff arc. 2. Add any *goal variable* that is a weakly connected neighbour of an existing variable (using any kind of arc). Note that in the iPDB paper, the second case was missed. Adding it significantly helps with performance in our experiments (see issue743, msg6595). In our implementation, for efficiency we replace condition 2. by only considering causal graph *successors* (along either pre->eff or eff--eff arcs), because these can be obtained directly, and the missing case (predecessors along pre->eff arcs) is already covered by the first condition anyway. This method precomputes all variables which satisfy conditions 1. or 2. for a given neighbour variable already in the pattern. */ static vector<vector<int>> compute_relevant_neighbours(const TaskProxy &task_proxy) { const causal_graph::CausalGraph &causal_graph = task_proxy.get_causal_graph(); const vector<int> goal_vars = get_goal_variables(task_proxy); vector<vector<int>> connected_vars_by_variable; VariablesProxy variables = task_proxy.get_variables(); connected_vars_by_variable.reserve(variables.size()); for (VariableProxy var : variables) { int var_id = var.get_id(); // Consider variables connected backwards via pre->eff arcs. const vector<int> &pre_to_eff_predecessors = causal_graph.get_eff_to_pre(var_id); // Consider goal variables connected (forwards) via eff--eff and pre->eff arcs. const vector<int> &causal_graph_successors = causal_graph.get_successors(var_id); vector<int> goal_variable_successors; set_intersection( causal_graph_successors.begin(), causal_graph_successors.end(), goal_vars.begin(), goal_vars.end(), back_inserter(goal_variable_successors)); // Combine relevant goal and non-goal variables. vector<int> relevant_neighbours; set_union( pre_to_eff_predecessors.begin(), pre_to_eff_predecessors.end(), goal_variable_successors.begin(), goal_variable_successors.end(), back_inserter(relevant_neighbours)); connected_vars_by_variable.push_back(move(relevant_neighbours)); } return connected_vars_by_variable; } PatternCollectionGeneratorHillclimbing::PatternCollectionGeneratorHillclimbing(const Options &opts) : pdb_max_size(opts.get<int>("pdb_max_size")), collection_max_size(opts.get<int>("collection_max_size")), num_samples(opts.get<int>("num_samples")), min_improvement(opts.get<int>("min_improvement")), max_time(opts.get<double>("max_time")), rng(utils::parse_rng_from_options(opts)), num_rejected(0), hill_climbing_timer(0) { } int PatternCollectionGeneratorHillclimbing::generate_candidate_pdbs( const TaskProxy &task_proxy, const vector<vector<int>> &relevant_neighbours, const PatternDatabase &pdb, set<Pattern> &generated_patterns, PDBCollection &candidate_pdbs) { const Pattern &pattern = pdb.get_pattern(); int pdb_size = pdb.get_size(); int max_pdb_size = 0; for (int pattern_var : pattern) { assert(utils::in_bounds(pattern_var, relevant_neighbours)); const vector<int> &connected_vars = relevant_neighbours[pattern_var]; // Only use variables which are not already in the pattern. vector<int> relevant_vars; set_difference( connected_vars.begin(), connected_vars.end(), pattern.begin(), pattern.end(), back_inserter(relevant_vars)); for (int rel_var_id : relevant_vars) { VariableProxy rel_var = task_proxy.get_variables()[rel_var_id]; int rel_var_size = rel_var.get_domain_size(); if (utils::is_product_within_limit(pdb_size, rel_var_size, pdb_max_size)) { Pattern new_pattern(pattern); new_pattern.push_back(rel_var_id); sort(new_pattern.begin(), new_pattern.end()); if (!generated_patterns.count(new_pattern)) { /* If we haven't seen this pattern before, generate a PDB for it and add it to candidate_pdbs if its size does not surpass the size limit. */ generated_patterns.insert(new_pattern); candidate_pdbs.push_back( make_shared<PatternDatabase>(task_proxy, new_pattern)); max_pdb_size = max(max_pdb_size, candidate_pdbs.back()->get_size()); } } else { ++num_rejected; } } } return max_pdb_size; } void PatternCollectionGeneratorHillclimbing::sample_states( const sampling::RandomWalkSampler &sampler, int init_h, vector<State> &samples) { assert(samples.empty()); samples.reserve(num_samples); for (int i = 0; i < num_samples; ++i) { samples.push_back(sampler.sample_state( init_h, [this](const State &state) { return current_pdbs->is_dead_end(state); })); if (hill_climbing_timer->is_expired()) { throw HillClimbingTimeout(); } } } pair<int, int> PatternCollectionGeneratorHillclimbing::find_best_improving_pdb( const vector<State> &samples, const vector<int> &samples_h_values, PDBCollection &candidate_pdbs) { /* TODO: The original implementation by Haslum et al. uses A* to compute h values for the sample states only instead of generating all PDBs. improvement: best improvement (= highest count) for a pattern so far. We require that a pattern must have an improvement of at least one in order to be taken into account. */ int improvement = 0; int best_pdb_index = -1; // Iterate over all candidates and search for the best improving pattern/pdb for (size_t i = 0; i < candidate_pdbs.size(); ++i) { if (hill_climbing_timer->is_expired()) throw HillClimbingTimeout(); const shared_ptr<PatternDatabase> &pdb = candidate_pdbs[i]; if (!pdb) { /* candidate pattern is too large or has already been added to the canonical heuristic. */ continue; } /* If a candidate's size added to the current collection's size exceeds the maximum collection size, then forget the pdb. */ int combined_size = current_pdbs->get_size() + pdb->get_size(); if (combined_size > collection_max_size) { candidate_pdbs[i] = nullptr; continue; } /* Calculate the "counting approximation" for all sample states: count the number of samples for which the current pattern collection heuristic would be improved if the new pattern was included into it. */ /* TODO: The original implementation by Haslum et al. uses m/t as a statistical confidence interval to stop the A*-search (which they use, see above) earlier. */ int count = 0; vector<PatternClique> pattern_cliques = current_pdbs->get_pattern_cliques(pdb->get_pattern()); for (int sample_id = 0; sample_id < num_samples; ++sample_id) { const State &sample = samples[sample_id]; assert(utils::in_bounds(sample_id, samples_h_values)); int h_collection = samples_h_values[sample_id]; if (is_heuristic_improved( *pdb, sample, h_collection, *current_pdbs->get_pattern_databases(), pattern_cliques)) { ++count; } } if (count > improvement) { improvement = count; best_pdb_index = i; } if (count > 0) { utils::g_log << "pattern: " << candidate_pdbs[i]->get_pattern() << " - improvement: " << count << endl; } } return make_pair(improvement, best_pdb_index); } bool PatternCollectionGeneratorHillclimbing::is_heuristic_improved( const PatternDatabase &pdb, const State &sample, int h_collection, const PDBCollection &pdbs, const vector<PatternClique> &pattern_cliques) { const vector<int> &sample_data = sample.get_unpacked_values(); // h_pattern: h-value of the new pattern int h_pattern = pdb.get_value(sample_data); if (h_pattern == numeric_limits<int>::max()) { return true; } // h_collection: h-value of the current collection heuristic if (h_collection == numeric_limits<int>::max()) return false; vector<int> h_values; h_values.reserve(pdbs.size()); for (const shared_ptr<PatternDatabase> &p : pdbs) { int h = p->get_value(sample_data); if (h == numeric_limits<int>::max()) return false; h_values.push_back(h); } for (const PatternClique &clilque : pattern_cliques) { int h_clique = 0; for (PatternID pattern_id : clilque) { h_clique += h_values[pattern_id]; } if (h_pattern + h_clique > h_collection) { /* return true if a pattern clique is found for which the condition is met */ return true; } } return false; } void PatternCollectionGeneratorHillclimbing::hill_climbing( const TaskProxy &task_proxy) { hill_climbing_timer = new utils::CountdownTimer(max_time); utils::g_log << "Average operator cost: " << task_properties::get_average_operator_cost(task_proxy) << endl; const vector<vector<int>> relevant_neighbours = compute_relevant_neighbours(task_proxy); // Candidate patterns generated so far (used to avoid duplicates). set<Pattern> generated_patterns; // The PDBs for the patterns in generated_patterns that satisfy the size // limit to avoid recomputation. PDBCollection candidate_pdbs; // The maximum size over all PDBs in candidate_pdbs. int max_pdb_size = 0; for (const shared_ptr<PatternDatabase> &current_pdb : *(current_pdbs->get_pattern_databases())) { int new_max_pdb_size = generate_candidate_pdbs( task_proxy, relevant_neighbours, *current_pdb, generated_patterns, candidate_pdbs); max_pdb_size = max(max_pdb_size, new_max_pdb_size); } /* NOTE: The initial set of candidate patterns (in generated_patterns) is guaranteed to be "normalized" in the sense that there are no duplicates and patterns are sorted. */ utils::g_log << "Done calculating initial candidate PDBs" << endl; int num_iterations = 0; State initial_state = task_proxy.get_initial_state(); sampling::RandomWalkSampler sampler(task_proxy, *rng); vector<State> samples; vector<int> samples_h_values; try { while (true) { ++num_iterations; int init_h = current_pdbs->get_value(initial_state); utils::g_log << "current collection size is " << current_pdbs->get_size() << endl; utils::g_log << "current initial h value: "; if (current_pdbs->is_dead_end(initial_state)) { utils::g_log << "infinite => stopping hill climbing" << endl; break; } else { utils::g_log << init_h << endl; } samples.clear(); samples_h_values.clear(); sample_states(sampler, init_h, samples); for (const State &sample : samples) { samples_h_values.push_back(current_pdbs->get_value(sample)); } pair<int, int> improvement_and_index = find_best_improving_pdb(samples, samples_h_values, candidate_pdbs); int improvement = improvement_and_index.first; int best_pdb_index = improvement_and_index.second; if (improvement < min_improvement) { utils::g_log << "Improvement below threshold. Stop hill climbing." << endl; break; } // Add the best PDB to the CanonicalPDBsHeuristic. assert(best_pdb_index != -1); const shared_ptr<PatternDatabase> &best_pdb = candidate_pdbs[best_pdb_index]; const Pattern &best_pattern = best_pdb->get_pattern(); utils::g_log << "found a better pattern with improvement " << improvement << endl; utils::g_log << "pattern: " << best_pattern << endl; current_pdbs->add_pdb(best_pdb); // Generate candidate patterns and PDBs for next iteration. int new_max_pdb_size = generate_candidate_pdbs( task_proxy, relevant_neighbours, *best_pdb, generated_patterns, candidate_pdbs); max_pdb_size = max(max_pdb_size, new_max_pdb_size); // Remove the added PDB from candidate_pdbs. candidate_pdbs[best_pdb_index] = nullptr; utils::g_log << "Hill climbing time so far: " << hill_climbing_timer->get_elapsed_time() << endl; } } catch (HillClimbingTimeout &) { utils::g_log << "Time limit reached. Abort hill climbing." << endl; } utils::g_log << "Hill climbing iterations: " << num_iterations << endl; utils::g_log << "Hill climbing generated patterns: " << generated_patterns.size() << endl; utils::g_log << "Hill climbing rejected patterns: " << num_rejected << endl; utils::g_log << "Hill climbing maximum PDB size: " << max_pdb_size << endl; utils::g_log << "Hill climbing time: " << hill_climbing_timer->get_elapsed_time() << endl; delete hill_climbing_timer; hill_climbing_timer = nullptr; } PatternCollectionInformation PatternCollectionGeneratorHillclimbing::generate( const shared_ptr<AbstractTask> &task) { TaskProxy task_proxy(*task); utils::Timer timer; utils::g_log << "Generating patterns using the hill climbing generator..." << endl; // Generate initial collection: a pattern for each goal variable. PatternCollection initial_pattern_collection; for (FactProxy goal : task_proxy.get_goals()) { int goal_var_id = goal.get_variable().get_id(); initial_pattern_collection.emplace_back(1, goal_var_id); } current_pdbs = utils::make_unique_ptr<IncrementalCanonicalPDBs>( task_proxy, initial_pattern_collection); utils::g_log << "Done calculating initial pattern collection: " << timer << endl; State initial_state = task_proxy.get_initial_state(); if (!current_pdbs->is_dead_end(initial_state) && max_time > 0) { hill_climbing(task_proxy); } PatternCollectionInformation pci = current_pdbs->get_pattern_collection_information(); dump_pattern_collection_generation_statistics( "Hill climbing generator", timer(), pci); return pci; } void add_hillclimbing_options(OptionParser &parser) { parser.add_option<int>( "pdb_max_size", "maximal number of states per pattern database ", "2000000", Bounds("1", "infinity")); parser.add_option<int>( "collection_max_size", "maximal number of states in the pattern collection", "20000000", Bounds("1", "infinity")); parser.add_option<int>( "num_samples", "number of samples (random states) on which to evaluate each " "candidate pattern collection", "1000", Bounds("1", "infinity")); parser.add_option<int>( "min_improvement", "minimum number of samples on which a candidate pattern " "collection must improve on the current one to be considered " "as the next pattern collection ", "10", Bounds("1", "infinity")); parser.add_option<double>( "max_time", "maximum time in seconds for improving the initial pattern " "collection via hill climbing. If set to 0, no hill climbing " "is performed at all. Note that this limit only affects hill " "climbing. Use max_time_dominance_pruning to limit the time " "spent for pruning dominated patterns.", "infinity", Bounds("0.0", "infinity")); utils::add_rng_options(parser); } void check_hillclimbing_options( OptionParser &parser, const Options &opts) { if (opts.get<int>("min_improvement") > opts.get<int>("num_samples")) parser.error("minimum improvement must not be higher than number of " "samples"); } static shared_ptr<PatternCollectionGenerator> _parse(OptionParser &parser) { add_hillclimbing_options(parser); Options opts = parser.parse(); if (parser.help_mode()) return nullptr; check_hillclimbing_options(parser, opts); if (parser.dry_run()) return nullptr; return make_shared<PatternCollectionGeneratorHillclimbing>(opts); } static shared_ptr<Heuristic> _parse_ipdb(OptionParser &parser) { parser.document_synopsis( "iPDB", "This pattern generation method is an adaption of the algorithm " "described in the following paper:" + utils::format_conference_reference( {"Patrik Haslum", "Adi Botea", "Malte Helmert", "Blai Bonet", "Sven Koenig"}, "Domain-Independent Construction of Pattern Database Heuristics for" " Cost-Optimal Planning", "http://www.informatik.uni-freiburg.de/~ki/papers/haslum-etal-aaai07.pdf", "Proceedings of the 22nd AAAI Conference on Artificial" " Intelligence (AAAI 2007)", "1007-1012", "AAAI Press", "2007") + "For implementation notes, see:" + utils::format_conference_reference( {"Silvan Sievers", "Manuela Ortlieb", "Malte Helmert"}, "Efficient Implementation of Pattern Database Heuristics for" " Classical Planning", "https://ai.dmi.unibas.ch/papers/sievers-et-al-socs2012.pdf", "Proceedings of the Fifth Annual Symposium on Combinatorial" " Search (SoCS 2012)", "105-111", "AAAI Press", "2012")); parser.document_note( "Note", "The pattern collection created by the algorithm will always contain " "all patterns consisting of a single goal variable, even if this " "violates the pdb_max_size or collection_max_size limits."); parser.document_language_support("action costs", "supported"); parser.document_language_support("conditional effects", "not supported"); parser.document_language_support("axioms", "not supported"); parser.document_property("admissible", "yes"); parser.document_property("consistent", "yes"); parser.document_property("safe", "yes"); parser.document_property("preferred operators", "no"); parser.document_note( "Note", "This pattern generation method uses the canonical pattern collection " "heuristic."); parser.document_note( "Implementation Notes", "The following will very briefly describe the algorithm and explain " "the differences between the original implementation from 2007 and the " "new one in Fast Downward.\n\n" "The aim of the algorithm is to output a pattern collection for which " "the Evaluator#Canonical_PDB yields the best heuristic estimates.\n\n" "The algorithm is basically a local search (hill climbing) which " "searches the \"pattern neighbourhood\" (starting initially with a " "pattern for each goal variable) for improving the pattern collection. " "This is done as described in the section \"pattern construction as " "search\" in the paper, except for the corrected search " "neighbourhood discussed below. For evaluating the " "neighbourhood, the \"counting approximation\" as introduced in the " "paper was implemented. An important difference however consists in " "the fact that this implementation computes all pattern databases for " "each candidate pattern rather than using A* search to compute the " "heuristic values only for the sample states for each pattern.\n\n" "Also the logic for sampling the search space differs a bit from the " "original implementation. The original implementation uses a random " "walk of a length which is binomially distributed with the mean at the " "estimated solution depth (estimation is done with the current pattern " "collection heuristic). In the Fast Downward implementation, also a " "random walk is used, where the length is the estimation of the number " "of solution steps, which is calculated by dividing the current " "heuristic estimate for the initial state by the average operator " "costs of the planning task (calculated only once and not updated " "during sampling!) to take non-unit cost problems into account. This " "yields a random walk of an expected lenght of np = 2 * estimated " "number of solution steps. If the random walk gets stuck, it is being " "restarted from the initial state, exactly as described in the " "original paper.\n\n" "The section \"avoiding redundant evaluations\" describes how the " "search neighbourhood of patterns can be restricted to variables that " "are relevant to the variables already included in the pattern by " "analyzing causal graphs. There is a mistake in the paper that leads " "to some relevant neighbouring patterns being ignored. See the [errata " "https://ai.dmi.unibas.ch/research/publications.html] for details. This " "mistake has been addressed in this implementation. " "The second approach described in the paper (statistical confidence " "interval) is not applicable to this implementation, as it doesn't use " "A* search but constructs the entire pattern databases for all " "candidate patterns anyway.\n" "The search is ended if there is no more improvement (or the " "improvement is smaller than the minimal improvement which can be set " "as an option), however there is no limit of iterations of the local " "search. This is similar to the techniques used in the original " "implementation as described in the paper.", true); add_hillclimbing_options(parser); /* Add, possibly among others, the options for dominance pruning. Note that using dominance pruning during hill climbing could lead to fewer discovered patterns and pattern collections. A dominated pattern (or pattern collection) might no longer be dominated after more patterns are added. We thus only use dominance pruning on the resulting collection. */ add_canonical_pdbs_options_to_parser(parser); Heuristic::add_options_to_parser(parser); Options opts = parser.parse(); if (parser.help_mode()) return nullptr; check_hillclimbing_options(parser, opts); if (parser.dry_run()) return nullptr; shared_ptr<PatternCollectionGeneratorHillclimbing> pgh = make_shared<PatternCollectionGeneratorHillclimbing>(opts); Options heuristic_opts; heuristic_opts.set<shared_ptr<AbstractTask>>( "transform", opts.get<shared_ptr<AbstractTask>>("transform")); heuristic_opts.set<bool>( "cache_estimates", opts.get<bool>("cache_estimates")); heuristic_opts.set<shared_ptr<PatternCollectionGenerator>>( "patterns", pgh); heuristic_opts.set<double>( "max_time_dominance_pruning", opts.get<double>("max_time_dominance_pruning")); return make_shared<CanonicalPDBsHeuristic>(heuristic_opts); } static Plugin<Evaluator> _plugin_ipdb("ipdb", _parse_ipdb, "heuristics_pdb"); static Plugin<PatternCollectionGenerator> _plugin("hillclimbing", _parse); }
26,383
C++
41.012739
99
0.634803
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/plugin_group.cc
#include "../plugin.h" namespace pdbs { static PluginGroupPlugin _plugin( "heuristics_pdb", "Pattern Database Heuristics"); }
135
C++
15.999998
35
0.696296
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/pattern_collection_generator_systematic.h
#ifndef PDBS_PATTERN_COLLECTION_GENERATOR_SYSTEMATIC_H #define PDBS_PATTERN_COLLECTION_GENERATOR_SYSTEMATIC_H #include "pattern_generator.h" #include "types.h" #include "../utils/hash.h" #include <cstdlib> #include <memory> #include <unordered_set> #include <vector> class TaskProxy; namespace causal_graph { class CausalGraph; } namespace options { class Options; } namespace pdbs { class CanonicalPDBsHeuristic; // Invariant: patterns are always sorted. class PatternCollectionGeneratorSystematic : public PatternCollectionGenerator { using PatternSet = utils::HashSet<Pattern>; const size_t max_pattern_size; const bool only_interesting_patterns; std::shared_ptr<PatternCollection> patterns; PatternSet pattern_set; // Cleared after pattern computation. void enqueue_pattern_if_new(const Pattern &pattern); void compute_eff_pre_neighbors(const causal_graph::CausalGraph &cg, const Pattern &pattern, std::vector<int> &result) const; void compute_connection_points(const causal_graph::CausalGraph &cg, const Pattern &pattern, std::vector<int> &result) const; void build_sga_patterns(const TaskProxy &task_proxy, const causal_graph::CausalGraph &cg); void build_patterns(const TaskProxy &task_proxy); void build_patterns_naive(const TaskProxy &task_proxy); public: explicit PatternCollectionGeneratorSystematic(const options::Options &opts); virtual PatternCollectionInformation generate( const std::shared_ptr<AbstractTask> &task) override; }; } #endif
1,668
C
28.803571
94
0.697242
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/incremental_canonical_pdbs.cc
#include "incremental_canonical_pdbs.h" #include "canonical_pdbs.h" #include "pattern_database.h" #include <limits> using namespace std; namespace pdbs { IncrementalCanonicalPDBs::IncrementalCanonicalPDBs( const TaskProxy &task_proxy, const PatternCollection &intitial_patterns) : task_proxy(task_proxy), patterns(make_shared<PatternCollection>(intitial_patterns.begin(), intitial_patterns.end())), pattern_databases(make_shared<PDBCollection>()), pattern_cliques(nullptr), size(0) { pattern_databases->reserve(patterns->size()); for (const Pattern &pattern : *patterns) add_pdb_for_pattern(pattern); are_additive = compute_additive_vars(task_proxy); recompute_pattern_cliques(); } void IncrementalCanonicalPDBs::add_pdb_for_pattern(const Pattern &pattern) { pattern_databases->push_back(make_shared<PatternDatabase>(task_proxy, pattern)); size += pattern_databases->back()->get_size(); } void IncrementalCanonicalPDBs::add_pdb(const shared_ptr<PatternDatabase> &pdb) { patterns->push_back(pdb->get_pattern()); pattern_databases->push_back(pdb); size += pattern_databases->back()->get_size(); recompute_pattern_cliques(); } void IncrementalCanonicalPDBs::recompute_pattern_cliques() { pattern_cliques = compute_pattern_cliques(*patterns, are_additive); } vector<PatternClique> IncrementalCanonicalPDBs::get_pattern_cliques( const Pattern &new_pattern) { return pdbs::compute_pattern_cliques_with_pattern( *patterns, *pattern_cliques, new_pattern, are_additive); } int IncrementalCanonicalPDBs::get_value(const State &state) const { CanonicalPDBs canonical_pdbs(pattern_databases, pattern_cliques); return canonical_pdbs.get_value(state); } bool IncrementalCanonicalPDBs::is_dead_end(const State &state) const { state.unpack(); for (const shared_ptr<PatternDatabase> &pdb : *pattern_databases) if (pdb->get_value(state.get_unpacked_values()) == numeric_limits<int>::max()) return true; return false; } PatternCollectionInformation IncrementalCanonicalPDBs::get_pattern_collection_information() const { PatternCollectionInformation result(task_proxy, patterns); result.set_pdbs(pattern_databases); result.set_pattern_cliques(pattern_cliques); return result; } }
2,419
C++
33.571428
86
0.70153
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pdbs/dominance_pruning.h
#ifndef PDBS_DOMINANCE_PRUNING_H #define PDBS_DOMINANCE_PRUNING_H #include "types.h" namespace pdbs { /* Clique superset dominates clique subset iff for every pattern p_subset in subset there is a pattern p_superset in superset where p_superset is a superset of p_subset. */ extern void prune_dominated_cliques( PatternCollection &patterns, PDBCollection &pdbs, std::vector<PatternClique> &pattern_cliques, int num_variables, double max_time); } #endif
482
C
21.999999
68
0.742739