file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/label_equivalence_relation.h
|
#ifndef MERGE_AND_SHRINK_LABEL_EQUIVALENCE_RELATION_H
#define MERGE_AND_SHRINK_LABEL_EQUIVALENCE_RELATION_H
#include "types.h"
#include <list>
#include <unordered_set>
#include <vector>
namespace merge_and_shrink {
class Labels;
using LabelIter = std::list<int>::iterator;
using LabelConstIter = std::list<int>::const_iterator;
class LabelGroup {
/*
A label group contains a set of locally equivalent labels, possibly of
different cost, and stores the minimum cost of all labels of the group.
*/
std::list<int> labels;
int cost;
public:
LabelGroup() : cost(INF) {
}
void set_cost(int cost_) {
cost = cost_;
}
LabelIter insert(int label) {
return labels.insert(labels.end(), label);
}
void erase(LabelIter pos) {
labels.erase(pos);
}
void clear() {
labels.clear();
}
LabelConstIter begin() const {
return labels.begin();
}
LabelConstIter end() const {
return labels.end();
}
bool empty() const {
return labels.empty();
}
int get_cost() const {
return cost;
}
};
class LabelEquivalenceRelation {
/*
This class groups labels together and allows easy access to the group
and position within a group for every label. It is used by the class
TransitionSystem to group locally equivalent labels. Label groups
have implicit IDs defined by their index in grouped_labels.
*/
const Labels &labels;
std::vector<LabelGroup> grouped_labels;
/* Maps each label to its group's ID (index in grouped_labels) and its
iterator within the group. */
std::vector<std::pair<int, LabelIter>> label_to_positions;
void add_label_to_group(int group_id, int label_no);
public:
LabelEquivalenceRelation(
const Labels &labels, const std::vector<std::vector<int>> &label_groups);
/*
NOTE: we need a custom copy constructor here because we need to fill
label_to_positions with correct LabelIter objects that point to the
copied LabelGroup objects rather than to those of the given
LabelEquivalenceRelation other.
*/
LabelEquivalenceRelation(const LabelEquivalenceRelation &other);
/*
The given label mappings (from label reduction) contain the new label
and the old labels that were reduced to the new one.
If affected_group_ids is not given, then all old labels must have been
in the same group before, and the new labels are added to this group.
Otherwise, all old labels are removed from their group(s) and the new
label is added to a new group. Furthermore, the costs of the affected
groups are recomputed.
*/
void apply_label_mapping(
const std::vector<std::pair<int, std::vector<int>>> &label_mapping,
const std::unordered_set<int> *affected_group_ids = nullptr);
// Moves all labels from one group into the other.
void move_group_into_group(int from_group_id, int to_group_id);
int add_label_group(const std::vector<int> &new_labels);
bool is_empty_group(int group_id) const {
return grouped_labels[group_id].empty();
}
int get_group_id(int label_no) const {
return label_to_positions[label_no].first;
}
int get_size() const {
return grouped_labels.size();
}
const LabelGroup &get_group(int group_id) const {
return grouped_labels.at(group_id);
}
};
}
#endif
| 3,474 |
C
| 27.252032 | 81 | 0.661773 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_stateless.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_STATELESS_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_STATELESS_H
#include "merge_strategy_factory.h"
namespace options {
class Options;
}
namespace merge_and_shrink {
class MergeSelector;
class MergeStrategyFactoryStateless : public MergeStrategyFactory {
std::shared_ptr<MergeSelector> merge_selector;
protected:
virtual std::string name() const override;
virtual void dump_strategy_specific_options() const override;
public:
explicit MergeStrategyFactoryStateless(options::Options &options);
virtual ~MergeStrategyFactoryStateless() override = default;
virtual std::unique_ptr<MergeStrategy> compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) override;
virtual bool requires_init_distances() const override;
virtual bool requires_goal_distances() const override;
};
}
#endif
| 923 |
C
| 30.862068 | 70 | 0.772481 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_miasm.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_MIASM_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_MIASM_H
#include "merge_scoring_function.h"
#include <memory>
namespace options {
class Options;
}
namespace merge_and_shrink {
class ShrinkStrategy;
class MergeScoringFunctionMIASM : public MergeScoringFunction {
std::shared_ptr<ShrinkStrategy> shrink_strategy;
const int max_states;
const int max_states_before_merge;
const int shrink_threshold_before_merge;
protected:
virtual std::string name() const override;
public:
explicit MergeScoringFunctionMIASM(const options::Options &options);
virtual ~MergeScoringFunctionMIASM() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual bool requires_init_distances() const override {
return true;
}
virtual bool requires_goal_distances() const override {
return true;
}
};
}
#endif
| 1,043 |
C
| 25.76923 | 75 | 0.732502 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/label_equivalence_relation.cc
|
#include "label_equivalence_relation.h"
#include "labels.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
LabelEquivalenceRelation::LabelEquivalenceRelation(
const Labels &labels, const vector<vector<int>> &label_groups)
: labels(labels) {
/* In the worst case, each label forms a singleton group, and thus with
label reduction, we could have labels.get_max_size() many groups. */
grouped_labels.reserve(labels.get_max_size());
label_to_positions.resize(labels.get_max_size());
for (const vector<int> &label_group : label_groups) {
add_label_group(label_group);
}
}
LabelEquivalenceRelation::LabelEquivalenceRelation(
const LabelEquivalenceRelation &other)
: labels(other.labels) {
// For the reserve call, see the comment in the constructor above.
grouped_labels.reserve(labels.get_max_size());
/*
Note that we do not copy label_to_positions because copying iterators
from potentially uninitialized iterators causes problems in debug mode.
This also means that label_to_positions contains uninitialized values
at all positions corresponding to already reduced labels (inactive
labels).
*/
label_to_positions.resize(other.label_to_positions.size());
for (size_t other_group_id = 0;
other_group_id < other.grouped_labels.size();
++other_group_id) {
// Add a new empty label group.
int group_id = grouped_labels.size();
assert(group_id == static_cast<int>(other_group_id));
grouped_labels.push_back(LabelGroup());
LabelGroup &label_group = grouped_labels.back();
/*
Go over the other label group, add all labels to this group.
To obtain exact copies of the label groups with the same cost, we do
not use add_label_to_group, which would recompute costs based on
given labels and leave cost=infinity for empty groups, but we
manually set the group's cost to match the other group's cost.
*/
const LabelGroup &other_label_group =
other.grouped_labels[other_group_id];
for (int other_label_no : other_label_group) {
LabelIter label_it = label_group.insert(other_label_no);
assert(*label_it == other_label_no);
label_to_positions[other_label_no] = make_pair(group_id, label_it);
}
label_group.set_cost(other_label_group.get_cost());
}
}
void LabelEquivalenceRelation::add_label_to_group(int group_id,
int label_no) {
LabelIter label_it = grouped_labels[group_id].insert(label_no);
label_to_positions[label_no] = make_pair(group_id, label_it);
int label_cost = labels.get_label_cost(label_no);
if (label_cost < grouped_labels[group_id].get_cost())
grouped_labels[group_id].set_cost(label_cost);
}
void LabelEquivalenceRelation::apply_label_mapping(
const vector<pair<int, vector<int>>> &label_mapping,
const unordered_set<int> *affected_group_ids) {
for (const pair<int, vector<int>> &mapping : label_mapping) {
int new_label_no = mapping.first;
const vector<int> &old_label_nos = mapping.second;
// Add new label to group
int canonical_group_id = get_group_id(old_label_nos.front());
if (!affected_group_ids) {
add_label_to_group(canonical_group_id, new_label_no);
} else {
add_label_group({new_label_no});
}
// Remove old labels from group
for (int old_label_no : old_label_nos) {
if (!affected_group_ids) {
assert(canonical_group_id == get_group_id(old_label_no));
}
LabelIter label_it = label_to_positions[old_label_no].second;
grouped_labels[get_group_id(old_label_no)].erase(label_it);
}
}
if (affected_group_ids) {
// Recompute the cost of all affected label groups.
const unordered_set<int> &group_ids = *affected_group_ids;
for (int group_id : group_ids) {
LabelGroup &label_group = grouped_labels[group_id];
// Setting cost to infinity for empty groups does not hurt.
label_group.set_cost(INF);
for (int label_no : label_group) {
int cost = labels.get_label_cost(label_no);
if (cost < label_group.get_cost()) {
label_group.set_cost(cost);
}
}
}
}
}
void LabelEquivalenceRelation::move_group_into_group(
int from_group_id, int to_group_id) {
assert(!is_empty_group(from_group_id));
assert(!is_empty_group(to_group_id));
LabelGroup &from_group = grouped_labels[from_group_id];
for (int label_no : from_group) {
add_label_to_group(to_group_id, label_no);
}
from_group.clear();
}
int LabelEquivalenceRelation::add_label_group(const vector<int> &new_labels) {
int new_group_id = grouped_labels.size();
grouped_labels.push_back(LabelGroup());
for (int label_no : new_labels) {
add_label_to_group(new_group_id, label_no);
}
return new_group_id;
}
}
| 5,185 |
C++
| 37.414815 | 79 | 0.627965 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/utils.cc
|
#include "utils.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "shrink_strategy.h"
#include "transition_system.h"
#include "../utils/logging.h"
#include "../utils/math.h"
#include <algorithm>
#include <cassert>
#include <cmath>
using namespace std;
namespace merge_and_shrink {
pair<int, int> compute_shrink_sizes(
int size1,
int size2,
int max_states_before_merge,
int max_states_after_merge) {
// Bound both sizes by max allowed size before merge.
int new_size1 = min(size1, max_states_before_merge);
int new_size2 = min(size2, max_states_before_merge);
if (!utils::is_product_within_limit(
new_size1, new_size2, max_states_after_merge)) {
int balanced_size = int(sqrt(max_states_after_merge));
if (new_size1 <= balanced_size) {
// Size of the first transition system is small enough. Use whatever
// is left for the second transition system.
new_size2 = max_states_after_merge / new_size1;
} else if (new_size2 <= balanced_size) {
// Inverted case as before.
new_size1 = max_states_after_merge / new_size2;
} else {
// Both transition systems are too big. We set both target sizes
// to balanced_size. An alternative would be to set one to
// N1 = balanced_size and the other to N2 = max_states_after_merge /
// balanced_size, to get closer to the allowed maximum.
// However, this would make little difference (N2 would
// always be N1, N1 + 1 or N1 + 2), and our solution has the
// advantage of treating the transition systems symmetrically.
new_size1 = balanced_size;
new_size2 = balanced_size;
}
}
assert(new_size1 <= size1 && new_size2 <= size2);
assert(new_size1 <= max_states_before_merge);
assert(new_size2 <= max_states_before_merge);
assert(new_size1 * new_size2 <= max_states_after_merge);
return make_pair(new_size1, new_size2);
}
/*
This method checks if the transition system of the factor at index violates
the size limit given via new_size (e.g. as computed by compute_shrink_sizes)
or the threshold shrink_threshold_before_merge that triggers shrinking even
if the size limit is not violated. If so, trigger the shrinking process.
Return true iff the factor was actually shrunk.
*/
bool shrink_factor(
FactoredTransitionSystem &fts,
int index,
int new_size,
int shrink_threshold_before_merge,
const ShrinkStrategy &shrink_strategy,
utils::Verbosity verbosity) {
/*
TODO: think about factoring out common logic of this function and the
function copy_and_shrink_ts in merge_scoring_function_miasm_utils.cc.
*/
const TransitionSystem &ts = fts.get_transition_system(index);
int num_states = ts.get_size();
if (num_states > min(new_size, shrink_threshold_before_merge)) {
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << ts.tag() << "current size: " << num_states;
if (new_size < num_states)
utils::g_log << " (new size limit: " << new_size;
else
utils::g_log << " (shrink threshold: " << shrink_threshold_before_merge;
utils::g_log << ")" << endl;
}
const Distances &distances = fts.get_distances(index);
StateEquivalenceRelation equivalence_relation =
shrink_strategy.compute_equivalence_relation(ts, distances, new_size);
// TODO: We currently violate this; see issue250
//assert(equivalence_relation.size() <= target_size);
return fts.apply_abstraction(index, equivalence_relation, verbosity);
}
return false;
}
bool shrink_before_merge_step(
FactoredTransitionSystem &fts,
int index1,
int index2,
int max_states,
int max_states_before_merge,
int shrink_threshold_before_merge,
const ShrinkStrategy &shrink_strategy,
utils::Verbosity verbosity) {
/*
Compute the size limit for both transition systems as imposed by
max_states and max_states_before_merge.
*/
pair<int, int> new_sizes = compute_shrink_sizes(
fts.get_transition_system(index1).get_size(),
fts.get_transition_system(index2).get_size(),
max_states_before_merge,
max_states);
/*
For both transition systems, possibly compute and apply an
abstraction.
TODO: we could better use the given limit by increasing the size limit
for the second shrinking if the first shrinking was larger than
required.
*/
bool shrunk1 = shrink_factor(
fts,
index1,
new_sizes.first,
shrink_threshold_before_merge,
shrink_strategy,
verbosity);
if (verbosity >= utils::Verbosity::VERBOSE && shrunk1) {
fts.statistics(index1);
}
bool shrunk2 = shrink_factor(
fts,
index2,
new_sizes.second,
shrink_threshold_before_merge,
shrink_strategy,
verbosity);
if (verbosity >= utils::Verbosity::VERBOSE && shrunk2) {
fts.statistics(index2);
}
return shrunk1 || shrunk2;
}
bool prune_step(
FactoredTransitionSystem &fts,
int index,
bool prune_unreachable_states,
bool prune_irrelevant_states,
utils::Verbosity verbosity) {
assert(prune_unreachable_states || prune_irrelevant_states);
const TransitionSystem &ts = fts.get_transition_system(index);
const Distances &distances = fts.get_distances(index);
int num_states = ts.get_size();
StateEquivalenceRelation state_equivalence_relation;
state_equivalence_relation.reserve(num_states);
int unreachable_count = 0;
int irrelevant_count = 0;
int dead_count = 0;
for (int state = 0; state < num_states; ++state) {
/* If pruning both unreachable and irrelevant states, a state which is
dead is counted for both statistics! */
bool prune_state = false;
if (prune_unreachable_states) {
assert(distances.are_init_distances_computed());
if (distances.get_init_distance(state) == INF) {
++unreachable_count;
prune_state = true;
}
}
if (prune_irrelevant_states) {
assert(distances.are_goal_distances_computed());
if (distances.get_goal_distance(state) == INF) {
++irrelevant_count;
prune_state = true;
}
}
if (prune_state) {
++dead_count;
} else {
StateEquivalenceClass state_equivalence_class;
state_equivalence_class.push_front(state);
state_equivalence_relation.push_back(state_equivalence_class);
}
}
if (verbosity >= utils::Verbosity::VERBOSE &&
(unreachable_count || irrelevant_count)) {
utils::g_log << ts.tag()
<< "unreachable: " << unreachable_count << " states, "
<< "irrelevant: " << irrelevant_count << " states ("
<< "total dead: " << dead_count << " states)" << endl;
}
return fts.apply_abstraction(index, state_equivalence_relation, verbosity);
}
vector<int> compute_abstraction_mapping(
int num_states,
const StateEquivalenceRelation &equivalence_relation) {
vector<int> abstraction_mapping(num_states, PRUNED_STATE);
for (size_t class_no = 0; class_no < equivalence_relation.size(); ++class_no) {
const StateEquivalenceClass &state_equivalence_class =
equivalence_relation[class_no];
for (int state : state_equivalence_class) {
assert(abstraction_mapping[state] == PRUNED_STATE);
abstraction_mapping[state] = class_no;
}
}
return abstraction_mapping;
}
bool is_goal_relevant(const TransitionSystem &ts) {
int num_states = ts.get_size();
for (int state = 0; state < num_states; ++state) {
if (!ts.is_goal_state(state)) {
return true;
}
}
return false;
}
}
| 8,123 |
C++
| 35.594594 | 88 | 0.623661 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/utils.h
|
#ifndef MERGE_AND_SHRINK_UTILS_H
#define MERGE_AND_SHRINK_UTILS_H
#include "types.h"
#include <memory>
#include <vector>
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
class ShrinkStrategy;
class TransitionSystem;
/*
Compute target sizes for shrinking two transition systems with sizes size1
and size2 before they are merged. Use the following rules:
1) Right before merging, the transition systems may have at most
max_states_before_merge states.
2) Right after merging, the product may have most max_states_after_merge
states.
3) Transition systems are shrunk as little as necessary to satisfy the above
constraints. (If possible, neither is shrunk at all.)
There is often a Pareto frontier of solutions following these rules. In this
case, balanced solutions (where the target sizes are close to each other)
are preferred over less balanced ones.
*/
extern std::pair<int, int> compute_shrink_sizes(
int size1,
int size2,
int max_states_before_merge,
int max_states_after_merge);
/*
This function first determines if any of the two factors at indices index1
and index2 must be shrunk according to the given size limits max_states and
max_states_before_merge, using the function compute_shrink_sizes (see above).
If not, then the function further checks if any of the two factors has a
size larger than shrink_treshold_before_merge, in which case shrinking is
still triggered.
If shrinking is triggered, apply the abstraction to the two factors
within the factored transition system. Return true iff at least one of the
factors was shrunk.
*/
extern bool shrink_before_merge_step(
FactoredTransitionSystem &fts,
int index1,
int index2,
int max_states,
int max_states_before_merge,
int shrink_threshold_before_merge,
const ShrinkStrategy &shrink_strategy,
utils::Verbosity verbosity);
/*
Prune unreachable and/or irrelevant states of the factor at index. This
requires that init and/or goal distances have been computed accordingly.
Return true iff any states have been pruned.
TODO: maybe this functionality belongs to a new class PruneStrategy.
*/
extern bool prune_step(
FactoredTransitionSystem &fts,
int index,
bool prune_unreachable_states,
bool prune_irrelevant_states,
utils::Verbosity verbosity);
/*
Compute the abstraction mapping based on the given state equivalence
relation.
*/
extern std::vector<int> compute_abstraction_mapping(
int num_states,
const StateEquivalenceRelation &equivalence_relation);
extern bool is_goal_relevant(const TransitionSystem &ts);
}
#endif
| 2,687 |
C
| 30.623529 | 79 | 0.756978 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/types.cc
|
#include "types.h"
#include <limits>
using namespace std;
namespace merge_and_shrink {
const int INF = numeric_limits<int>::max();
const int MINUSINF = numeric_limits<int>::min();
const int PRUNED_STATE = -1;
}
| 214 |
C++
| 16.916665 | 48 | 0.705607 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_single_random.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_SINGLE_RANDOM_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_SINGLE_RANDOM_H
#include "merge_scoring_function.h"
#include <memory>
namespace options {
class Options;
}
namespace utils {
class RandomNumberGenerator;
}
namespace merge_and_shrink {
class TransitionSystem;
class MergeScoringFunctionSingleRandom : public MergeScoringFunction {
int random_seed; // only for dump options
std::shared_ptr<utils::RandomNumberGenerator> rng;
protected:
virtual std::string name() const override;
virtual void dump_function_specific_options() const override;
public:
explicit MergeScoringFunctionSingleRandom(const options::Options &options);
virtual ~MergeScoringFunctionSingleRandom() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return false;
}
};
}
#endif
| 1,138 |
C
| 26.119047 | 79 | 0.744288 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_miasm.cc
|
#include "merge_scoring_function_miasm.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "merge_and_shrink_algorithm.h"
#include "shrink_strategy.h"
#include "transition_system.h"
#include "merge_scoring_function_miasm_utils.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
using namespace std;
namespace merge_and_shrink {
MergeScoringFunctionMIASM::MergeScoringFunctionMIASM(
const options::Options &options)
: shrink_strategy(options.get<shared_ptr<ShrinkStrategy>>("shrink_strategy")),
max_states(options.get<int>("max_states")),
max_states_before_merge(options.get<int>("max_states_before_merge")),
shrink_threshold_before_merge(options.get<int>("threshold_before_merge")) {
}
vector<double> MergeScoringFunctionMIASM::compute_scores(
const FactoredTransitionSystem &fts,
const vector<pair<int, int>> &merge_candidates) {
vector<double> scores;
scores.reserve(merge_candidates.size());
for (pair<int, int> merge_candidate : merge_candidates) {
int index1 = merge_candidate.first;
int index2 = merge_candidate.second;
unique_ptr<TransitionSystem> product = shrink_before_merge_externally(
fts,
index1,
index2,
*shrink_strategy,
max_states,
max_states_before_merge,
shrink_threshold_before_merge);
// Compute distances for the product and count the alive states.
unique_ptr<Distances> distances = utils::make_unique_ptr<Distances>(*product);
const bool compute_init_distances = true;
const bool compute_goal_distances = true;
const utils::Verbosity verbosity = utils::Verbosity::SILENT;
distances->compute_distances(compute_init_distances, compute_goal_distances, verbosity);
int num_states = product->get_size();
int alive_states_count = 0;
for (int state = 0; state < num_states; ++state) {
if (distances->get_init_distance(state) != INF &&
distances->get_goal_distance(state) != INF) {
++alive_states_count;
}
}
/*
Compute the score as the ratio of alive states of the product
compared to the number of states of the full product.
*/
assert(num_states);
double score = static_cast<double>(alive_states_count) /
static_cast<double>(num_states);
scores.push_back(score);
}
return scores;
}
string MergeScoringFunctionMIASM::name() const {
return "miasm";
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"MIASM",
"This scoring function favors merging transition systems such that in "
"their product, there are many dead states, which can then be pruned "
"without sacrificing information. In particular, the score it assigns "
"to a product is the ratio of alive states to the total number of "
"states. To compute this score, this class thus computes the product "
"of all pairs of transition systems, potentially copying and shrinking "
"the transition systems before if otherwise their product would exceed "
"the specified size limits. A stateless merge strategy using this "
"scoring function is called dyn-MIASM (nowadays also called sbMIASM "
"for score-based MIASM) and is described in the following paper:"
+ utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"An Analysis of Merge Strategies for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf",
"Proceedings of the 26th International Conference on Planning and "
"Scheduling (ICAPS 2016)",
"2358-2366",
"AAAI Press",
"2016"));
parser.document_note(
"Note",
"To obtain the configurations called dyn-MIASM described in the paper, "
"use the following configuration of the merge-and-shrink heuristic "
"and adapt the tie-breaking criteria of {{{total_order}}} as desired:\n "
"{{{\nmerge_and_shrink(merge_strategy=merge_stateless(merge_selector="
"score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy="
"shrink_bisimulation(greedy=false),max_states=50000,"
"threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,"
"product_ts_order=new_to_old,atomic_before_product=true)])),"
"shrink_strategy=shrink_bisimulation(greedy=false),label_reduction="
"exact(before_shrinking=true,before_merging=false),max_states=50000,"
"threshold_before_merge=1)\n}}}");
parser.document_note(
"Note",
"Unless you know what you are doing, we recommend using the same "
"options related to shrinking for {{{sf_miasm}}} as for {{{"
"merge_and_shrink}}}, i.e. the options {{{shrink_strategy}}}, {{{"
"max_states}}}, and {{{threshold_before_merge}}} should be set "
"identically. Furthermore, as this scoring function maximizes the "
"amount of possible pruning, merge-and-shrink should be configured to "
"use full pruning, i.e. {{{prune_unreachable_states=true}}} and {{{"
"prune_irrelevant_states=true}}} (the default).");
// TODO: use shrink strategy and limit options from MergeAndShrinkHeuristic
// instead of having the identical options here again.
parser.add_option<shared_ptr<ShrinkStrategy>>(
"shrink_strategy",
"We recommend setting this to match the shrink strategy configuration "
"given to {{{merge_and_shrink}}}, see note below.");
add_transition_system_size_limit_options_to_parser(parser);
options::Options options = parser.parse();
if (parser.help_mode()) {
return nullptr;
}
handle_shrink_limit_options_defaults(options);
if (parser.dry_run()) {
return nullptr;
} else {
return make_shared<MergeScoringFunctionMIASM>(options);
}
}
static options::Plugin<MergeScoringFunction> _plugin("sf_miasm", _parse);
}
| 6,323 |
C++
| 42.315068 | 96 | 0.659023 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/factored_transition_system.cc
|
#include "factored_transition_system.h"
#include "distances.h"
#include "labels.h"
#include "merge_and_shrink_representation.h"
#include "transition_system.h"
#include "utils.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include "../utils/system.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
FTSConstIterator::FTSConstIterator(
const FactoredTransitionSystem &fts,
bool end)
: fts(fts), current_index((end ? fts.get_size() : 0)) {
next_valid_index();
}
void FTSConstIterator::next_valid_index() {
while (current_index < fts.get_size()
&& !fts.is_active(current_index)) {
++current_index;
}
}
void FTSConstIterator::operator++() {
++current_index;
next_valid_index();
}
FactoredTransitionSystem::FactoredTransitionSystem(
unique_ptr<Labels> labels,
vector<unique_ptr<TransitionSystem>> &&transition_systems,
vector<unique_ptr<MergeAndShrinkRepresentation>> &&mas_representations,
vector<unique_ptr<Distances>> &&distances,
const bool compute_init_distances,
const bool compute_goal_distances,
utils::Verbosity verbosity)
: labels(move(labels)),
transition_systems(move(transition_systems)),
mas_representations(move(mas_representations)),
distances(move(distances)),
compute_init_distances(compute_init_distances),
compute_goal_distances(compute_goal_distances),
num_active_entries(this->transition_systems.size()) {
for (size_t index = 0; index < this->transition_systems.size(); ++index) {
if (compute_init_distances || compute_goal_distances) {
this->distances[index]->compute_distances(
compute_init_distances, compute_goal_distances, verbosity);
}
assert(is_component_valid(index));
}
}
FactoredTransitionSystem::FactoredTransitionSystem(FactoredTransitionSystem &&other)
: labels(move(other.labels)),
transition_systems(move(other.transition_systems)),
mas_representations(move(other.mas_representations)),
distances(move(other.distances)),
compute_init_distances(move(other.compute_init_distances)),
compute_goal_distances(move(other.compute_goal_distances)),
num_active_entries(move(other.num_active_entries)) {
/*
This is just a default move constructor. Unfortunately Visual
Studio does not support "= default" for move construction or
move assignment as of this writing.
*/
}
FactoredTransitionSystem::~FactoredTransitionSystem() {
}
void FactoredTransitionSystem::assert_index_valid(int index) const {
assert(utils::in_bounds(index, transition_systems));
assert(utils::in_bounds(index, mas_representations));
assert(utils::in_bounds(index, distances));
if (!(transition_systems[index] && mas_representations[index] && distances[index]) &&
!(!transition_systems[index] && !mas_representations[index] && !distances[index])) {
cerr << "Factor at index is in an inconsistent state!" << endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
}
bool FactoredTransitionSystem::is_component_valid(int index) const {
assert(is_active(index));
if (compute_init_distances && !distances[index]->are_init_distances_computed()) {
return false;
}
if (compute_goal_distances && !distances[index]->are_goal_distances_computed()) {
return false;
}
return transition_systems[index]->are_transitions_sorted_unique() &&
transition_systems[index]->in_sync_with_label_equivalence_relation();
}
void FactoredTransitionSystem::assert_all_components_valid() const {
for (size_t index = 0; index < transition_systems.size(); ++index) {
if (transition_systems[index]) {
assert(is_component_valid(index));
}
}
}
void FactoredTransitionSystem::apply_label_mapping(
const vector<pair<int, vector<int>>> &label_mapping,
int combinable_index) {
assert_all_components_valid();
for (const auto &new_label_old_labels : label_mapping) {
assert(new_label_old_labels.first == labels->get_size());
labels->reduce_labels(new_label_old_labels.second);
}
for (size_t i = 0; i < transition_systems.size(); ++i) {
if (transition_systems[i]) {
transition_systems[i]->apply_label_reduction(
label_mapping, static_cast<int>(i) != combinable_index);
}
}
assert_all_components_valid();
}
bool FactoredTransitionSystem::apply_abstraction(
int index,
const StateEquivalenceRelation &state_equivalence_relation,
utils::Verbosity verbosity) {
assert(is_component_valid(index));
int new_num_states = state_equivalence_relation.size();
if (new_num_states == transition_systems[index]->get_size()) {
return false;
}
vector<int> abstraction_mapping = compute_abstraction_mapping(
transition_systems[index]->get_size(), state_equivalence_relation);
transition_systems[index]->apply_abstraction(
state_equivalence_relation, abstraction_mapping, verbosity);
if (compute_init_distances || compute_goal_distances) {
distances[index]->apply_abstraction(
state_equivalence_relation,
compute_init_distances,
compute_goal_distances,
verbosity);
}
mas_representations[index]->apply_abstraction_to_lookup_table(
abstraction_mapping);
/* If distances need to be recomputed, this already happened in the
Distances object. */
assert(is_component_valid(index));
return true;
}
int FactoredTransitionSystem::merge(
int index1,
int index2,
utils::Verbosity verbosity) {
assert(is_component_valid(index1));
assert(is_component_valid(index2));
transition_systems.push_back(
TransitionSystem::merge(
*labels,
*transition_systems[index1],
*transition_systems[index2],
verbosity));
distances[index1] = nullptr;
distances[index2] = nullptr;
transition_systems[index1] = nullptr;
transition_systems[index2] = nullptr;
mas_representations.push_back(
utils::make_unique_ptr<MergeAndShrinkRepresentationMerge>(
move(mas_representations[index1]),
move(mas_representations[index2])));
mas_representations[index1] = nullptr;
mas_representations[index2] = nullptr;
const TransitionSystem &new_ts = *transition_systems.back();
distances.push_back(utils::make_unique_ptr<Distances>(new_ts));
int new_index = transition_systems.size() - 1;
// Restore the invariant that distances are computed.
if (compute_init_distances || compute_goal_distances) {
distances[new_index]->compute_distances(
compute_init_distances, compute_goal_distances, verbosity);
}
--num_active_entries;
assert(is_component_valid(new_index));
return new_index;
}
pair<unique_ptr<MergeAndShrinkRepresentation>, unique_ptr<Distances>>
FactoredTransitionSystem::extract_factor(int index) {
assert(is_component_valid(index));
return make_pair(move(mas_representations[index]),
move(distances[index]));
}
void FactoredTransitionSystem::statistics(int index) const {
assert(is_component_valid(index));
const TransitionSystem &ts = *transition_systems[index];
ts.statistics();
const Distances &dist = *distances[index];
dist.statistics();
}
void FactoredTransitionSystem::dump(int index) const {
assert_index_valid(index);
transition_systems[index]->dump_labels_and_transitions();
mas_representations[index]->dump();
}
void FactoredTransitionSystem::dump() const {
for (int index : *this) {
dump(index);
}
}
bool FactoredTransitionSystem::is_factor_solvable(int index) const {
assert(is_component_valid(index));
return transition_systems[index]->is_solvable(*distances[index]);
}
bool FactoredTransitionSystem::is_factor_trivial(int index) const {
assert(is_component_valid(index));
if (!mas_representations[index]->is_total()) {
return false;
}
const TransitionSystem &ts = *transition_systems[index];
for (int state = 0; state < ts.get_size(); ++state) {
if (!ts.is_goal_state(state)) {
return false;
}
}
return true;
}
bool FactoredTransitionSystem::is_active(int index) const {
assert_index_valid(index);
return transition_systems[index] != nullptr;
}
}
| 8,533 |
C++
| 33.550607 | 92 | 0.677019 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_MIASM_UTILS_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_MIASM_UTILS_H
#include <memory>
namespace merge_and_shrink {
class FactoredTransitionSystem;
class ShrinkStrategy;
class TransitionSystem;
/*
Copy the two transition systems at the given indices, possibly shrink them
according to the same rules as merge-and-shrink does, and return their
product.
*/
extern std::unique_ptr<TransitionSystem> shrink_before_merge_externally(
const FactoredTransitionSystem &fts,
int index1,
int index2,
const ShrinkStrategy &shrink_strategy,
int max_states,
int max_states_before_merge,
int shrink_threshold_before_merge);
}
#endif
| 711 |
C
| 25.370369 | 76 | 0.767932 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_goal_relevance.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_GOAL_RELEVANCE_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_GOAL_RELEVANCE_H
#include "merge_scoring_function.h"
namespace merge_and_shrink {
class MergeScoringFunctionGoalRelevance : public MergeScoringFunction {
protected:
virtual std::string name() const override;
public:
MergeScoringFunctionGoalRelevance() = default;
virtual ~MergeScoringFunctionGoalRelevance() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return false;
}
};
}
#endif
| 815 |
C
| 28.142856 | 75 | 0.736196 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_total_order.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_TOTAL_ORDER_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_TOTAL_ORDER_H
#include "merge_scoring_function.h"
#include <memory>
namespace options {
class OptionParser;
class Options;
}
namespace utils {
class RandomNumberGenerator;
}
namespace merge_and_shrink {
class MergeScoringFunctionTotalOrder : public MergeScoringFunction {
enum class AtomicTSOrder {
REVERSE_LEVEL,
LEVEL,
RANDOM
};
AtomicTSOrder atomic_ts_order;
enum class ProductTSOrder {
OLD_TO_NEW,
NEW_TO_OLD,
RANDOM
};
ProductTSOrder product_ts_order;
bool atomic_before_product;
int random_seed; // only for dump options
std::shared_ptr<utils::RandomNumberGenerator> rng;
std::vector<std::pair<int, int>> merge_candidate_order;
protected:
virtual std::string name() const override;
virtual void dump_function_specific_options() const override;
public:
explicit MergeScoringFunctionTotalOrder(const options::Options &options);
virtual ~MergeScoringFunctionTotalOrder() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual void initialize(const TaskProxy &task_proxy) override;
static void add_options_to_parser(options::OptionParser &parser);
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return false;
}
};
}
#endif
| 1,610 |
C
| 26.775862 | 77 | 0.711801 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_algorithm.h
|
#ifndef MERGE_AND_SHRINK_MERGE_AND_SHRINK_ALGORITHM_H
#define MERGE_AND_SHRINK_MERGE_AND_SHRINK_ALGORITHM_H
#include <memory>
class TaskProxy;
namespace options {
class OptionParser;
class Options;
}
namespace utils {
class CountdownTimer;
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
class LabelReduction;
class MergeStrategyFactory;
class ShrinkStrategy;
class MergeAndShrinkAlgorithm {
// TODO: when the option parser supports it, the following should become
// unique pointers.
std::shared_ptr<MergeStrategyFactory> merge_strategy_factory;
std::shared_ptr<ShrinkStrategy> shrink_strategy;
std::shared_ptr<LabelReduction> label_reduction;
// Options for shrinking
// Hard limit: the maximum size of a transition system at any point.
const int max_states;
// Hard limit: the maximum size of a transition system before being merged.
const int max_states_before_merge;
/* A soft limit for triggering shrinking even if the hard limits
max_states and max_states_before_merge are not violated. */
const int shrink_threshold_before_merge;
// Options for pruning
const bool prune_unreachable_states;
const bool prune_irrelevant_states;
const utils::Verbosity verbosity;
const double main_loop_max_time;
long starting_peak_memory;
void report_peak_memory_delta(bool final = false) const;
void dump_options() const;
void warn_on_unusual_options() const;
bool ran_out_of_time(const utils::CountdownTimer &timer) const;
void statistics(int maximum_intermediate_size) const;
void main_loop(
FactoredTransitionSystem &fts,
const TaskProxy &task_proxy);
public:
explicit MergeAndShrinkAlgorithm(const options::Options &opts);
FactoredTransitionSystem build_factored_transition_system(const TaskProxy &task_proxy);
};
extern void add_merge_and_shrink_algorithm_options_to_parser(options::OptionParser &parser);
extern void add_transition_system_size_limit_options_to_parser(options::OptionParser &parser);
extern void handle_shrink_limit_options_defaults(options::Options &opts);
}
#endif
| 2,156 |
C
| 30.720588 | 94 | 0.752319 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/fts_factory.h
|
#ifndef MERGE_AND_SHRINK_FTS_FACTORY_H
#define MERGE_AND_SHRINK_FTS_FACTORY_H
/*
Factory for factored transition systems.
Takes a planning task and produces a factored transition system that
represents the planning task. This provides the main bridge from
planning tasks to the concepts on which merge-and-shrink abstractions
are based (transition systems, labels, etc.). The "internal" classes of
merge-and-shrink should not need to know about planning task concepts.
*/
class TaskProxy;
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
extern FactoredTransitionSystem create_factored_transition_system(
const TaskProxy &task_proxy,
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity);
}
#endif
| 819 |
C
| 25.451612 | 73 | 0.777778 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_representation.h
|
#ifndef MERGE_AND_SHRINK_MERGE_AND_SHRINK_REPRESENTATION_H
#define MERGE_AND_SHRINK_MERGE_AND_SHRINK_REPRESENTATION_H
#include <memory>
#include <vector>
class State;
namespace merge_and_shrink {
class Distances;
class MergeAndShrinkRepresentation {
protected:
int domain_size;
public:
explicit MergeAndShrinkRepresentation(int domain_size);
virtual ~MergeAndShrinkRepresentation() = 0;
int get_domain_size() const;
// Store distances instead of abstract state numbers.
virtual void set_distances(const Distances &) = 0;
virtual void apply_abstraction_to_lookup_table(
const std::vector<int> &abstraction_mapping) = 0;
/*
Return the value that state is mapped to. This is either an abstract
state (if set_distances has not been called) or a distance (if it has).
If the represented function is not total, the returned value is DEAD_END
if the abstract state is PRUNED_STATE or if the (distance) value is INF.
*/
virtual int get_value(const State &state) const = 0;
/* Return true iff the represented function is total, i.e., does not map
to PRUNED_STATE. */
virtual bool is_total() const = 0;
virtual void dump() const = 0;
};
class MergeAndShrinkRepresentationLeaf : public MergeAndShrinkRepresentation {
const int var_id;
std::vector<int> lookup_table;
public:
MergeAndShrinkRepresentationLeaf(int var_id, int domain_size);
virtual ~MergeAndShrinkRepresentationLeaf() = default;
virtual void set_distances(const Distances &) override;
virtual void apply_abstraction_to_lookup_table(
const std::vector<int> &abstraction_mapping) override;
virtual int get_value(const State &state) const override;
virtual bool is_total() const override;
virtual void dump() const override;
};
class MergeAndShrinkRepresentationMerge : public MergeAndShrinkRepresentation {
std::unique_ptr<MergeAndShrinkRepresentation> left_child;
std::unique_ptr<MergeAndShrinkRepresentation> right_child;
std::vector<std::vector<int>> lookup_table;
public:
MergeAndShrinkRepresentationMerge(
std::unique_ptr<MergeAndShrinkRepresentation> left_child,
std::unique_ptr<MergeAndShrinkRepresentation> right_child);
virtual ~MergeAndShrinkRepresentationMerge() = default;
virtual void set_distances(const Distances &distances) override;
virtual void apply_abstraction_to_lookup_table(
const std::vector<int> &abstraction_mapping) override;
virtual int get_value(const State &state) const override;
virtual bool is_total() const override;
virtual void dump() const override;
};
}
#endif
| 2,662 |
C
| 34.039473 | 79 | 0.732156 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_strategy.h
|
#ifndef MERGE_AND_SHRINK_SHRINK_STRATEGY_H
#define MERGE_AND_SHRINK_SHRINK_STRATEGY_H
#include "types.h"
#include <string>
#include <vector>
namespace merge_and_shrink {
class Distances;
class TransitionSystem;
class ShrinkStrategy {
protected:
virtual std::string name() const = 0;
virtual void dump_strategy_specific_options() const = 0;
public:
ShrinkStrategy() = default;
virtual ~ShrinkStrategy() = default;
/*
Compute a state equivalence relation over the states of the given
transition system such that its new number of states after abstracting
it according to this equivalence relation is at most target_size
(currently violated; see issue250). dist must be the distances
information associated with the given transition system.
Note that if target_size equals the current size of the transition system,
the shrink strategy is not required to compute an equivalence relation
that results in actually shrinking the size of the transition system.
However, it may attempt to e.g. compute an equivalence relation that
results in shrinking the transition system in an information-preserving
way.
*/
virtual StateEquivalenceRelation compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const = 0;
virtual bool requires_init_distances() const = 0;
virtual bool requires_goal_distances() const = 0;
void dump_options() const;
std::string get_name() const;
};
}
#endif
| 1,561 |
C
| 31.541666 | 80 | 0.724536 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_sccs.cc
|
#include "merge_strategy_factory_sccs.h"
#include "merge_strategy_sccs.h"
#include "merge_selector.h"
#include "merge_tree_factory.h"
#include "transition_system.h"
#include "../task_proxy.h"
#include "../algorithms/sccs.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../task_utils/causal_graph.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/system.h"
#include <algorithm>
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
bool compare_sccs_increasing(const vector<int> &lhs, const vector<int> &rhs) {
return lhs.size() < rhs.size();
}
bool compare_sccs_decreasing(const vector<int> &lhs, const vector<int> &rhs) {
return lhs.size() > rhs.size();
}
MergeStrategyFactorySCCs::MergeStrategyFactorySCCs(const options::Options &options)
: order_of_sccs(options.get<OrderOfSCCs>("order_of_sccs")),
merge_tree_factory(nullptr),
merge_selector(nullptr) {
if (options.contains("merge_tree")) {
merge_tree_factory = options.get<shared_ptr<MergeTreeFactory>>("merge_tree");
}
if (options.contains("merge_selector")) {
merge_selector = options.get<shared_ptr<MergeSelector>>("merge_selector");
}
}
unique_ptr<MergeStrategy> MergeStrategyFactorySCCs::compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) {
VariablesProxy vars = task_proxy.get_variables();
int num_vars = vars.size();
// Compute SCCs of the causal graph.
vector<vector<int>> cg;
cg.reserve(num_vars);
for (VariableProxy var : vars) {
const vector<int> &successors =
task_proxy.get_causal_graph().get_successors(var.get_id());
cg.push_back(successors);
}
vector<vector<int>> sccs(sccs::compute_maximal_sccs(cg));
// Put the SCCs in the desired order.
switch (order_of_sccs) {
case OrderOfSCCs::TOPOLOGICAL:
// SCCs are computed in topological order.
break;
case OrderOfSCCs::REVERSE_TOPOLOGICAL:
// SCCs are computed in topological order.
reverse(sccs.begin(), sccs.end());
break;
case OrderOfSCCs::DECREASING:
sort(sccs.begin(), sccs.end(), compare_sccs_decreasing);
break;
case OrderOfSCCs::INCREASING:
sort(sccs.begin(), sccs.end(), compare_sccs_increasing);
break;
}
/*
Compute the indices at which the merged SCCs can be found when all
SCCs have been merged.
*/
int index = num_vars - 1;
utils::g_log << "SCCs of the causal graph:" << endl;
vector<vector<int>> non_singleton_cg_sccs;
vector<int> indices_of_merged_sccs;
indices_of_merged_sccs.reserve(sccs.size());
for (const vector<int> &scc : sccs) {
utils::g_log << scc << endl;
int scc_size = scc.size();
if (scc_size == 1) {
indices_of_merged_sccs.push_back(scc.front());
} else {
index += scc_size - 1;
indices_of_merged_sccs.push_back(index);
non_singleton_cg_sccs.push_back(scc);
}
}
if (sccs.size() == 1) {
utils::g_log << "Only one single SCC" << endl;
}
if (static_cast<int>(sccs.size()) == num_vars) {
utils::g_log << "Only singleton SCCs" << endl;
assert(non_singleton_cg_sccs.empty());
}
if (merge_selector) {
merge_selector->initialize(task_proxy);
}
return utils::make_unique_ptr<MergeStrategySCCs>(
fts,
task_proxy,
merge_tree_factory,
merge_selector,
move(non_singleton_cg_sccs),
move(indices_of_merged_sccs));
}
bool MergeStrategyFactorySCCs::requires_init_distances() const {
if (merge_tree_factory) {
return merge_tree_factory->requires_init_distances();
} else {
return merge_selector->requires_init_distances();
}
}
bool MergeStrategyFactorySCCs::requires_goal_distances() const {
if (merge_tree_factory) {
return merge_tree_factory->requires_goal_distances();
} else {
return merge_selector->requires_goal_distances();
}
}
void MergeStrategyFactorySCCs::dump_strategy_specific_options() const {
utils::g_log << "Merge order of sccs: ";
switch (order_of_sccs) {
case OrderOfSCCs::TOPOLOGICAL:
utils::g_log << "topological";
break;
case OrderOfSCCs::REVERSE_TOPOLOGICAL:
utils::g_log << "reverse topological";
break;
case OrderOfSCCs::DECREASING:
utils::g_log << "decreasing";
break;
case OrderOfSCCs::INCREASING:
utils::g_log << "increasing";
break;
}
utils::g_log << endl;
utils::g_log << "Merge strategy for merging within sccs: " << endl;
if (merge_tree_factory) {
merge_tree_factory->dump_options();
}
if (merge_selector) {
merge_selector->dump_options();
}
}
string MergeStrategyFactorySCCs::name() const {
return "sccs";
}
static shared_ptr<MergeStrategyFactory>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Merge strategy SSCs",
"This merge strategy implements the algorithm described in the paper "
+ utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"An Analysis of Merge Strategies for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf",
"Proceedings of the 26th International Conference on Planning and "
"Scheduling (ICAPS 2016)",
"2358-2366",
"AAAI Press",
"2016") +
"In a nutshell, it computes the maximal SCCs of the causal graph, "
"obtaining a partitioning of the task's variables. Every such "
"partition is then merged individually, using the specified fallback "
"merge strategy, considering the SCCs in a configurable order. "
"Afterwards, all resulting composite abstractions are merged to form "
"the final abstraction, again using the specified fallback merge "
"strategy and the configurable order of the SCCs.");
vector<string> order_of_sccs;
order_of_sccs.push_back("topological");
order_of_sccs.push_back("reverse_topological");
order_of_sccs.push_back("decreasing");
order_of_sccs.push_back("increasing");
parser.add_enum_option<OrderOfSCCs>(
"order_of_sccs",
order_of_sccs,
"choose an ordering of the SCCs: topological/reverse_topological or "
"decreasing/increasing in the size of the SCCs. The former two options "
"refer to the directed graph where each obtained SCC is a "
"'supervertex'. For the latter two options, the tie-breaking is to "
"use the topological order according to that same graph of SCC "
"supervertices.",
"topological");
parser.add_option<shared_ptr<MergeTreeFactory>>(
"merge_tree",
"the fallback merge strategy to use if a precomputed strategy should "
"be used.",
options::OptionParser::NONE);
parser.add_option<shared_ptr<MergeSelector>>(
"merge_selector",
"the fallback merge strategy to use if a stateless strategy should "
"be used.",
options::OptionParser::NONE);
options::Options options = parser.parse();
if (parser.help_mode()) {
return nullptr;
} else if (parser.dry_run()) {
bool merge_tree = options.contains("merge_tree");
bool merge_selector = options.contains("merge_selector");
if ((merge_tree && merge_selector) || (!merge_tree && !merge_selector)) {
cerr << "You have to specify exactly one of the options merge_tree "
"and merge_selector!" << endl;
utils::exit_with(utils::ExitCode::SEARCH_INPUT_ERROR);
}
return nullptr;
} else {
return make_shared<MergeStrategyFactorySCCs>(options);
}
}
static options::Plugin<MergeStrategyFactory> _plugin("merge_sccs", _parse);
}
| 8,099 |
C++
| 33.913793 | 85 | 0.63514 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_random.cc
|
#include "shrink_random.h"
#include "factored_transition_system.h"
#include "transition_system.h"
#include "../option_parser.h"
#include "../plugin.h"
#include <cassert>
#include <memory>
using namespace std;
namespace merge_and_shrink {
ShrinkRandom::ShrinkRandom(const Options &opts)
: ShrinkBucketBased(opts) {
}
vector<ShrinkBucketBased::Bucket> ShrinkRandom::partition_into_buckets(
const TransitionSystem &ts,
const Distances &) const {
vector<Bucket> buckets;
buckets.resize(1);
Bucket &big_bucket = buckets.back();
big_bucket.reserve(ts.get_size());
int num_states = ts.get_size();
for (int state = 0; state < num_states; ++state)
big_bucket.push_back(state);
assert(!big_bucket.empty());
return buckets;
}
string ShrinkRandom::name() const {
return "random";
}
static shared_ptr<ShrinkStrategy>_parse(OptionParser &parser) {
parser.document_synopsis("Random", "");
ShrinkBucketBased::add_options_to_parser(parser);
Options opts = parser.parse();
if (parser.help_mode())
return nullptr;
if (parser.dry_run())
return nullptr;
else
return make_shared<ShrinkRandom>(opts);
}
static Plugin<ShrinkStrategy> _plugin("shrink_random", _parse);
}
| 1,260 |
C++
| 23.25 | 71 | 0.678571 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_sccs.cc
|
#include "merge_strategy_sccs.h"
#include "factored_transition_system.h"
#include "merge_selector.h"
#include "merge_tree.h"
#include "merge_tree_factory.h"
#include "transition_system.h"
#include <algorithm>
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
MergeStrategySCCs::MergeStrategySCCs(
const FactoredTransitionSystem &fts,
const TaskProxy &task_proxy,
const shared_ptr<MergeTreeFactory> &merge_tree_factory,
const shared_ptr<MergeSelector> &merge_selector,
vector<vector<int>> non_singleton_cg_sccs,
vector<int> indices_of_merged_sccs)
: MergeStrategy(fts),
task_proxy(task_proxy),
merge_tree_factory(merge_tree_factory),
merge_selector(merge_selector),
non_singleton_cg_sccs(move(non_singleton_cg_sccs)),
indices_of_merged_sccs(move(indices_of_merged_sccs)),
current_merge_tree(nullptr) {
}
MergeStrategySCCs::~MergeStrategySCCs() {
}
pair<int, int> MergeStrategySCCs::get_next() {
// We did not already start merging an SCC/all finished SCCs, so we
// do not have a current set of indices we want to finish merging.
if (current_ts_indices.empty()) {
// Get the next indices we need to merge
if (non_singleton_cg_sccs.empty()) {
assert(indices_of_merged_sccs.size() > 1);
current_ts_indices = move(indices_of_merged_sccs);
} else {
vector<int> ¤t_scc = non_singleton_cg_sccs.front();
assert(current_scc.size() > 1);
current_ts_indices = move(current_scc);
non_singleton_cg_sccs.erase(non_singleton_cg_sccs.begin());
}
// If using a merge tree factory, compute a merge tree for this set
if (merge_tree_factory) {
current_merge_tree = merge_tree_factory->compute_merge_tree(
task_proxy, fts, current_ts_indices);
}
} else {
// Add the most recent merge to the current indices set
current_ts_indices.push_back(fts.get_size() - 1);
}
// Select the next merge for the current set of indices, either using the
// tree or the selector.
pair<int, int > next_pair;
int merged_ts_index = fts.get_size();
if (current_merge_tree) {
assert(!current_merge_tree->done());
next_pair = current_merge_tree->get_next_merge(merged_ts_index);
if (current_merge_tree->done()) {
current_merge_tree = nullptr;
}
} else {
assert(merge_selector);
next_pair = merge_selector->select_merge(fts, current_ts_indices);
}
// Remove the two merged indices from the current set of indices.
for (vector<int>::iterator it = current_ts_indices.begin();
it != current_ts_indices.end();) {
if (*it == next_pair.first || *it == next_pair.second) {
it = current_ts_indices.erase(it);
} else {
++it;
}
}
return next_pair;
}
}
| 2,967 |
C++
| 33.114942 | 77 | 0.630266 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/types.h
|
#ifndef MERGE_AND_SHRINK_TYPES_H
#define MERGE_AND_SHRINK_TYPES_H
#include <forward_list>
#include <list>
#include <vector>
namespace merge_and_shrink {
// Positive infinity. The name "INFINITY" is taken by an ISO C99 macro.
extern const int INF;
extern const int MINUSINF;
extern const int PRUNED_STATE;
/*
An equivalence class is a set of abstract states that shall be
mapped (shrunk) to the same abstract state.
An equivalence relation is a partitioning of states into
equivalence classes. It may omit certain states entirely; these
will be dropped completely and receive an h value of infinity.
*/
using StateEquivalenceClass = std::forward_list<int>;
using StateEquivalenceRelation = std::vector<StateEquivalenceClass>;
}
#endif
| 749 |
C
| 26.777777 | 71 | 0.771696 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_tree_factory.cc
|
#include "merge_tree_factory.h"
#include "merge_tree.h"
#include "../options/option_parser.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include "../utils/rng_options.h"
#include "../utils/system.h"
#include <iostream>
using namespace std;
namespace merge_and_shrink {
MergeTreeFactory::MergeTreeFactory(const options::Options &options)
: rng(utils::parse_rng_from_options(options)),
update_option(options.get<UpdateOption>("update_option")) {
}
void MergeTreeFactory::dump_options() const {
utils::g_log << "Merge tree options: " << endl;
utils::g_log << "Type: " << name() << endl;
utils::g_log << "Update option: ";
switch (update_option) {
case UpdateOption::USE_FIRST:
utils::g_log << "use first";
break;
case UpdateOption::USE_SECOND:
utils::g_log << "use second";
break;
case UpdateOption::USE_RANDOM:
utils::g_log << "use random";
break;
}
utils::g_log << endl;
dump_tree_specific_options();
}
unique_ptr<MergeTree> MergeTreeFactory::compute_merge_tree(
const TaskProxy &,
const FactoredTransitionSystem &,
const vector<int> &) {
cerr << "This merge tree does not support being computed on a subset "
"of indices for a given factored transition system!" << endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
void MergeTreeFactory::add_options_to_parser(options::OptionParser &parser) {
utils::add_rng_options(parser);
vector<string> update_option;
update_option.push_back("use_first");
update_option.push_back("use_second");
update_option.push_back("use_random");
parser.add_enum_option<UpdateOption>(
"update_option",
update_option,
"When the merge tree is used within another merge strategy, how "
"should it be updated when a merge different to a merge from the "
"tree is performed: choose among use_first, use_second, and "
"use_random to choose which node of the tree should survive and "
"represent the new merged index. Specify use_first (use_second) to "
"let the node represententing the index that would have been merged "
"earlier (later) survive. use_random chooses a random node.",
"use_random");
}
static options::PluginTypePlugin<MergeTreeFactory> _type_plugin(
"MergeTree",
"This page describes the available merge trees that can be used to "
"precompute a merge strategy, either for the entire task or a given "
"subset of transition systems of a given factored transition system.\n"
"Merge trees are typically used in the merge strategy of type "
"'precomputed', but they can also be used as fallback merge strategies in "
"'combined' merge strategies.");
}
| 2,787 |
C++
| 34.743589 | 79 | 0.672049 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_single_random.cc
|
#include "merge_scoring_function_single_random.h"
#include "types.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
MergeScoringFunctionSingleRandom::MergeScoringFunctionSingleRandom(
const options::Options &options)
: random_seed(options.get<int>("random_seed")),
rng(utils::parse_rng_from_options(options)) {
}
vector<double> MergeScoringFunctionSingleRandom::compute_scores(
const FactoredTransitionSystem &,
const vector<pair<int, int>> &merge_candidates) {
int chosen_index = (*rng)(merge_candidates.size());
vector<double> scores;
scores.reserve(merge_candidates.size());
for (size_t candidate_index = 0; candidate_index < merge_candidates.size();
++candidate_index) {
if (static_cast<int>(candidate_index) == chosen_index) {
scores.push_back(0);
} else {
scores.push_back(INF);
}
}
return scores;
}
string MergeScoringFunctionSingleRandom::name() const {
return "single random";
}
void MergeScoringFunctionSingleRandom::dump_function_specific_options() const {
utils::g_log << "Random seed: " << random_seed << endl;
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Single random",
"This scoring function assigns exactly one merge candidate a score of "
"0, chosen randomly, and infinity to all others.");
utils::add_rng_options(parser);
options::Options options = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeScoringFunctionSingleRandom>(options);
}
static options::Plugin<MergeScoringFunction> _plugin("single_random", _parse);
}
| 1,932 |
C++
| 28.738461 | 79 | 0.684783 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_bucket_based.cc
|
#include "shrink_bucket_based.h"
#include "../utils/logging.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
#include <iostream>
#include <vector>
using namespace std;
namespace merge_and_shrink {
ShrinkBucketBased::ShrinkBucketBased(const options::Options &opts)
: rng(utils::parse_rng_from_options(opts)) {
}
void ShrinkBucketBased::add_options_to_parser(options::OptionParser &parser) {
utils::add_rng_options(parser);
}
StateEquivalenceRelation ShrinkBucketBased::compute_abstraction(
const vector<Bucket> &buckets, int target_size) const {
bool show_combine_buckets_warning = true;
StateEquivalenceRelation equiv_relation;
equiv_relation.reserve(target_size);
size_t num_states_to_go = 0;
for (size_t bucket_no = 0; bucket_no < buckets.size(); ++bucket_no)
num_states_to_go += buckets[bucket_no].size();
for (size_t bucket_no = 0; bucket_no < buckets.size(); ++bucket_no) {
const vector<int> &bucket = buckets[bucket_no];
int states_used_up = static_cast<int>(equiv_relation.size());
int remaining_state_budget = target_size - states_used_up;
num_states_to_go -= bucket.size();
int budget_for_this_bucket = remaining_state_budget - num_states_to_go;
if (budget_for_this_bucket >= static_cast<int>(bucket.size())) {
// Each state in bucket can become a singleton group.
for (size_t i = 0; i < bucket.size(); ++i) {
StateEquivalenceClass group;
group.push_front(bucket[i]);
equiv_relation.push_back(group);
}
} else if (budget_for_this_bucket <= 1) {
// The whole bucket must form one group.
int remaining_buckets = buckets.size() - bucket_no;
if (remaining_state_budget >= remaining_buckets) {
equiv_relation.push_back(StateEquivalenceClass());
} else {
if (bucket_no == 0)
equiv_relation.push_back(StateEquivalenceClass());
if (show_combine_buckets_warning) {
show_combine_buckets_warning = false;
utils::g_log << "Very small node limit, must combine buckets."
<< endl;
}
}
StateEquivalenceClass &group = equiv_relation.back();
group.insert_after(group.before_begin(), bucket.begin(), bucket.end());
} else {
// Complicated case: must combine until bucket budget is met.
// First create singleton groups.
vector<StateEquivalenceClass> groups(bucket.size());
for (size_t i = 0; i < bucket.size(); ++i)
groups[i].push_front(bucket[i]);
// Then combine groups until required size is reached.
assert(budget_for_this_bucket >= 2 &&
budget_for_this_bucket < static_cast<int>(groups.size()));
while (static_cast<int>(groups.size()) > budget_for_this_bucket) {
auto it1 = rng->choose(groups);
auto it2 = it1;
while (it1 == it2) {
it2 = rng->choose(groups);
}
it1->splice_after(it1->before_begin(), *it2);
swap(*it2, groups.back());
assert(groups.back().empty());
groups.pop_back();
}
// Finally add these groups to the result.
for (size_t i = 0; i < groups.size(); ++i) {
equiv_relation.push_back(StateEquivalenceClass());
equiv_relation.back().swap(groups[i]);
}
}
}
return equiv_relation;
}
StateEquivalenceRelation ShrinkBucketBased::compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const {
vector<Bucket> buckets = partition_into_buckets(ts, distances);
return compute_abstraction(buckets, target_size);
}
}
| 4,024 |
C++
| 38.460784 | 83 | 0.578777 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/fts_factory.cc
|
#include "fts_factory.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "label_equivalence_relation.h"
#include "labels.h"
#include "merge_and_shrink_representation.h"
#include "transition_system.h"
#include "types.h"
#include "../task_proxy.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include <algorithm>
#include <cassert>
#include <unordered_map>
#include <vector>
using namespace std;
namespace merge_and_shrink {
class FTSFactory {
const TaskProxy &task_proxy;
struct TransitionSystemData {
// The following two attributes are only used for statistics
int num_variables;
vector<int> incorporated_variables;
unique_ptr<LabelEquivalenceRelation> label_equivalence_relation;
vector<vector<int>> label_groups;
vector<vector<Transition>> transitions_by_group_id;
vector<bool> relevant_labels;
int num_states;
vector<bool> goal_states;
int init_state;
TransitionSystemData(TransitionSystemData &&other)
: num_variables(other.num_variables),
incorporated_variables(move(other.incorporated_variables)),
label_equivalence_relation(move(other.label_equivalence_relation)),
label_groups(move(other.label_groups)),
transitions_by_group_id(move(other.transitions_by_group_id)),
relevant_labels(move(other.relevant_labels)),
num_states(other.num_states),
goal_states(move(other.goal_states)),
init_state(other.init_state) {
}
TransitionSystemData() = default;
TransitionSystemData(TransitionSystemData &other) = delete;
TransitionSystemData &operator=(TransitionSystemData &other) = delete;
};
vector<TransitionSystemData> transition_system_data_by_var;
// see TODO in build_transitions()
int task_has_conditional_effects;
vector<unique_ptr<Label>> create_labels();
void build_state_data(VariableProxy var);
void initialize_transition_system_data(const Labels &labels);
bool is_relevant(int var_no, int label_no) const;
void mark_as_relevant(int var_no, int label_no);
unordered_map<int, int> compute_preconditions(OperatorProxy op);
void handle_operator_effect(
OperatorProxy op,
EffectProxy effect,
const unordered_map<int, int> &pre_val,
vector<bool> &has_effect_on_var,
vector<vector<Transition>> &transitions_by_var);
void handle_operator_precondition(
OperatorProxy op,
FactProxy precondition,
const vector<bool> &has_effect_on_var,
vector<vector<Transition>> &transitions_by_var);
void build_transitions_for_operator(OperatorProxy op);
void build_transitions_for_irrelevant_ops(VariableProxy variable);
void build_transitions();
vector<unique_ptr<TransitionSystem>> create_transition_systems(const Labels &labels);
vector<unique_ptr<MergeAndShrinkRepresentation>> create_mas_representations() const;
vector<unique_ptr<Distances>> create_distances(
const vector<unique_ptr<TransitionSystem>> &transition_systems) const;
public:
explicit FTSFactory(const TaskProxy &task_proxy);
~FTSFactory();
/*
Note: create() may only be called once. We don't worry about
misuse because the class is only used internally in this file.
*/
FactoredTransitionSystem create(
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity);
};
FTSFactory::FTSFactory(const TaskProxy &task_proxy)
: task_proxy(task_proxy), task_has_conditional_effects(false) {
}
FTSFactory::~FTSFactory() {
}
vector<unique_ptr<Label>> FTSFactory::create_labels() {
vector<unique_ptr<Label>> result;
int num_ops = task_proxy.get_operators().size();
if (num_ops > 0) {
int max_num_labels = 2 * num_ops - 1;
result.reserve(max_num_labels);
}
for (OperatorProxy op : task_proxy.get_operators()) {
result.push_back(utils::make_unique_ptr<Label>(op.get_cost()));
}
return result;
}
void FTSFactory::build_state_data(VariableProxy var) {
int var_id = var.get_id();
TransitionSystemData &ts_data = transition_system_data_by_var[var_id];
ts_data.init_state = task_proxy.get_initial_state()[var_id].get_value();
int range = task_proxy.get_variables()[var_id].get_domain_size();
ts_data.num_states = range;
int goal_value = -1;
GoalsProxy goals = task_proxy.get_goals();
for (FactProxy goal : goals) {
if (goal.get_variable().get_id() == var_id) {
assert(goal_value == -1);
goal_value = goal.get_value();
break;
}
}
ts_data.goal_states.resize(range, false);
for (int value = 0; value < range; ++value) {
if (value == goal_value || goal_value == -1) {
ts_data.goal_states[value] = true;
}
}
}
void FTSFactory::initialize_transition_system_data(const Labels &labels) {
VariablesProxy variables = task_proxy.get_variables();
int num_labels = task_proxy.get_operators().size();
transition_system_data_by_var.resize(variables.size());
for (VariableProxy var : variables) {
TransitionSystemData &ts_data = transition_system_data_by_var[var.get_id()];
ts_data.num_variables = variables.size();
ts_data.incorporated_variables.push_back(var.get_id());
ts_data.transitions_by_group_id.reserve(labels.get_max_size());
ts_data.relevant_labels.resize(num_labels, false);
build_state_data(var);
}
}
bool FTSFactory::is_relevant(int var_no, int label_no) const {
return transition_system_data_by_var[var_no].relevant_labels[label_no];
}
void FTSFactory::mark_as_relevant(int var_no, int label_no) {
transition_system_data_by_var[var_no].relevant_labels[label_no] = true;
}
unordered_map<int, int> FTSFactory::compute_preconditions(OperatorProxy op) {
unordered_map<int, int> pre_val;
for (FactProxy precondition : op.get_preconditions())
pre_val[precondition.get_variable().get_id()] =
precondition.get_value();
return pre_val;
}
void FTSFactory::handle_operator_effect(
OperatorProxy op,
EffectProxy effect,
const unordered_map<int, int> &pre_val,
vector<bool> &has_effect_on_var,
vector<vector<Transition>> &transitions_by_var) {
int label_no = op.get_id();
FactProxy fact = effect.get_fact();
VariableProxy var = fact.get_variable();
int var_no = var.get_id();
has_effect_on_var[var_no] = true;
int post_value = fact.get_value();
// Determine possible values that var can have when this
// operator is applicable.
int pre_value = -1;
auto pre_val_it = pre_val.find(var_no);
if (pre_val_it != pre_val.end())
pre_value = pre_val_it->second;
int pre_value_min, pre_value_max;
if (pre_value == -1) {
pre_value_min = 0;
pre_value_max = var.get_domain_size();
} else {
pre_value_min = pre_value;
pre_value_max = pre_value + 1;
}
/*
cond_effect_pre_value == x means that the effect has an
effect condition "var == x".
cond_effect_pre_value == -1 means no effect condition on var.
has_other_effect_cond is true iff there exists an effect
condition on a variable other than var.
*/
EffectConditionsProxy effect_conditions = effect.get_conditions();
int cond_effect_pre_value = -1;
bool has_other_effect_cond = false;
for (FactProxy condition : effect_conditions) {
if (condition.get_variable() == var) {
cond_effect_pre_value = condition.get_value();
} else {
has_other_effect_cond = true;
}
}
// Handle transitions that occur when the effect triggers.
for (int value = pre_value_min; value < pre_value_max; ++value) {
/*
Only add a transition if it is possible that the effect
triggers. We can rule out that the effect triggers if it has
a condition on var and this condition is not satisfied.
*/
if (cond_effect_pre_value == -1 || cond_effect_pre_value == value)
transitions_by_var[var_no].emplace_back(value, post_value);
}
// Handle transitions that occur when the effect does not trigger.
if (!effect_conditions.empty()) {
for (int value = pre_value_min; value < pre_value_max; ++value) {
/*
Add self-loop if the effect might not trigger.
If the effect has a condition on another variable, then
it can fail to trigger no matter which value var has.
If it only has a condition on var, then the effect
fails to trigger if this condition is false.
*/
if (has_other_effect_cond || value != cond_effect_pre_value)
transitions_by_var[var_no].emplace_back(value, value);
}
task_has_conditional_effects = true;
}
mark_as_relevant(var_no, label_no);
}
void FTSFactory::handle_operator_precondition(
OperatorProxy op,
FactProxy precondition,
const vector<bool> &has_effect_on_var,
vector<vector<Transition>> &transitions_by_var) {
int label_no = op.get_id();
int var_no = precondition.get_variable().get_id();
if (!has_effect_on_var[var_no]) {
int value = precondition.get_value();
transitions_by_var[var_no].emplace_back(value, value);
mark_as_relevant(var_no, label_no);
}
}
void FTSFactory::build_transitions_for_operator(OperatorProxy op) {
/*
- Mark op as relevant in the transition systems corresponding
to variables on which it has a precondition or effect.
- Add transitions induced by op in these transition systems.
*/
unordered_map<int, int> pre_val = compute_preconditions(op);
int num_variables = task_proxy.get_variables().size();
vector<bool> has_effect_on_var(task_proxy.get_variables().size(), false);
vector<vector<Transition>> transitions_by_var(num_variables);
for (EffectProxy effect : op.get_effects())
handle_operator_effect(op, effect, pre_val, has_effect_on_var, transitions_by_var);
/*
We must handle preconditions *after* effects because handling
the effects sets has_effect_on_var.
*/
for (FactProxy precondition : op.get_preconditions())
handle_operator_precondition(op, precondition, has_effect_on_var, transitions_by_var);
int label_no = op.get_id();
for (int var_no = 0; var_no < num_variables; ++var_no) {
if (!is_relevant(var_no, label_no)) {
/*
We do not want to add transitions of irrelevant labels here,
since they are handled together in a separate step.
*/
continue;
}
vector<Transition> &transitions = transitions_by_var[var_no];
/*
TODO: Our method for generating transitions is only guarantueed
to generate sorted and unique transitions if the task has no
conditional effects.
*/
if (task_has_conditional_effects) {
utils::sort_unique(transitions);
} else {
assert(utils::is_sorted_unique(transitions));
}
vector<vector<Transition>> &existing_transitions_by_group_id =
transition_system_data_by_var[var_no].transitions_by_group_id;
vector<vector<int>> &label_groups = transition_system_data_by_var[var_no].label_groups;
assert(existing_transitions_by_group_id.size() == label_groups.size());
bool found_locally_equivalent_label_group = false;
for (size_t group_id = 0; group_id < existing_transitions_by_group_id.size(); ++group_id) {
const vector<Transition> &group_transitions = existing_transitions_by_group_id[group_id];
if (transitions == group_transitions) {
label_groups[group_id].push_back(label_no);
found_locally_equivalent_label_group = true;
break;
}
}
if (!found_locally_equivalent_label_group) {
existing_transitions_by_group_id.push_back(move(transitions));
label_groups.push_back({label_no});
}
}
}
void FTSFactory::build_transitions_for_irrelevant_ops(VariableProxy variable) {
int var_no = variable.get_id();
int num_states = variable.get_domain_size();
int num_labels = task_proxy.get_operators().size();
// Collect all irrelevant labels for this variable.
vector<int> irrelevant_labels;
for (int label_no = 0; label_no < num_labels; ++label_no) {
if (!is_relevant(var_no, label_no)) {
irrelevant_labels.push_back(label_no);
}
}
TransitionSystemData &ts_data = transition_system_data_by_var[var_no];
if (!irrelevant_labels.empty()) {
vector<Transition> transitions;
transitions.reserve(num_states);
for (int state = 0; state < num_states; ++state)
transitions.emplace_back(state, state);
ts_data.label_groups.push_back(move(irrelevant_labels));
ts_data.transitions_by_group_id.push_back(move(transitions));
}
}
void FTSFactory::build_transitions() {
/*
- Compute all transitions of all operators for all variables, grouping
transitions of locally equivalent labels for a given variable.
- Computes relevant operator information as a side effect.
*/
for (OperatorProxy op : task_proxy.get_operators())
build_transitions_for_operator(op);
/*
Compute transitions of irrelevant operators for each variable only
once and put the labels into a single label group.
*/
for (VariableProxy variable : task_proxy.get_variables())
build_transitions_for_irrelevant_ops(variable);
}
vector<unique_ptr<TransitionSystem>> FTSFactory::create_transition_systems(const Labels &labels) {
// Create the actual TransitionSystem objects.
int num_variables = task_proxy.get_variables().size();
// We reserve space for the transition systems added later by merging.
vector<unique_ptr<TransitionSystem>> result;
assert(num_variables >= 1);
result.reserve(num_variables * 2 - 1);
for (int var_no = 0; var_no < num_variables; ++var_no) {
TransitionSystemData &ts_data = transition_system_data_by_var[var_no];
/* Construct the label equivalence relation from the previously
computed label groups. */
ts_data.label_equivalence_relation =
utils::make_unique_ptr<LabelEquivalenceRelation>(
labels, ts_data.label_groups);
result.push_back(utils::make_unique_ptr<TransitionSystem>(
ts_data.num_variables,
move(ts_data.incorporated_variables),
move(ts_data.label_equivalence_relation),
move(ts_data.transitions_by_group_id),
ts_data.num_states,
move(ts_data.goal_states),
ts_data.init_state
));
}
return result;
}
vector<unique_ptr<MergeAndShrinkRepresentation>> FTSFactory::create_mas_representations() const {
// Create the actual MergeAndShrinkRepresentation objects.
int num_variables = task_proxy.get_variables().size();
// We reserve space for the transition systems added later by merging.
vector<unique_ptr<MergeAndShrinkRepresentation>> result;
assert(num_variables >= 1);
result.reserve(num_variables * 2 - 1);
for (int var_no = 0; var_no < num_variables; ++var_no) {
int range = task_proxy.get_variables()[var_no].get_domain_size();
result.push_back(
utils::make_unique_ptr<MergeAndShrinkRepresentationLeaf>(var_no, range));
}
return result;
}
vector<unique_ptr<Distances>> FTSFactory::create_distances(
const vector<unique_ptr<TransitionSystem>> &transition_systems) const {
// Create the actual Distances objects.
int num_variables = task_proxy.get_variables().size();
// We reserve space for the transition systems added later by merging.
vector<unique_ptr<Distances>> result;
assert(num_variables >= 1);
result.reserve(num_variables * 2 - 1);
for (int var_no = 0; var_no < num_variables; ++var_no) {
result.push_back(
utils::make_unique_ptr<Distances>(*transition_systems[var_no]));
}
return result;
}
FactoredTransitionSystem FTSFactory::create(
const bool compute_init_distances,
const bool compute_goal_distances,
utils::Verbosity verbosity) {
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Building atomic transition systems... " << endl;
}
unique_ptr<Labels> labels = utils::make_unique_ptr<Labels>(create_labels());
initialize_transition_system_data(*labels);
build_transitions();
vector<unique_ptr<TransitionSystem>> transition_systems =
create_transition_systems(*labels);
vector<unique_ptr<MergeAndShrinkRepresentation>> mas_representations =
create_mas_representations();
vector<unique_ptr<Distances>> distances =
create_distances(transition_systems);
return FactoredTransitionSystem(
move(labels),
move(transition_systems),
move(mas_representations),
move(distances),
compute_init_distances,
compute_goal_distances,
verbosity);
}
FactoredTransitionSystem create_factored_transition_system(
const TaskProxy &task_proxy,
const bool compute_init_distances,
const bool compute_goal_distances,
utils::Verbosity verbosity) {
return FTSFactory(task_proxy).create(
compute_init_distances,
compute_goal_distances,
verbosity);
}
}
| 17,939 |
C++
| 37.170213 | 101 | 0.646915 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_selector.cc
|
#include "merge_selector.h"
#include "factored_transition_system.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
vector<pair<int, int>> MergeSelector::compute_merge_candidates(
const FactoredTransitionSystem &fts,
const vector<int> &indices_subset) const {
vector<pair<int, int>> merge_candidates;
if (indices_subset.empty()) {
for (int ts_index1 = 0; ts_index1 < fts.get_size(); ++ts_index1) {
if (fts.is_active(ts_index1)) {
for (int ts_index2 = ts_index1 + 1; ts_index2 < fts.get_size();
++ts_index2) {
if (fts.is_active(ts_index2)) {
merge_candidates.emplace_back(ts_index1, ts_index2);
}
}
}
}
} else {
assert(indices_subset.size() > 1);
for (size_t i = 0; i < indices_subset.size(); ++i) {
int ts_index1 = indices_subset[i];
assert(fts.is_active(ts_index1));
for (size_t j = i + 1; j < indices_subset.size(); ++j) {
int ts_index2 = indices_subset[j];
assert(fts.is_active(ts_index2));
merge_candidates.emplace_back(ts_index1, ts_index2);
}
}
}
return merge_candidates;
}
void MergeSelector::dump_options() const {
utils::g_log << "Merge selector options:" << endl;
utils::g_log << "Name: " << name() << endl;
dump_specific_options();
}
static options::PluginTypePlugin<MergeSelector> _type_plugin(
"MergeSelector",
"This page describes the available merge selectors. They are used to "
"compute the next merge purely based on the state of the given factored "
"transition system. They are used in the merge strategy of type "
"'stateless', but they can also easily be used in different 'combined' "
"merged strategies.");
}
| 1,984 |
C++
| 32.644067 | 79 | 0.584173 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_stateless.cc
|
#include "merge_strategy_stateless.h"
#include "merge_selector.h"
using namespace std;
namespace merge_and_shrink {
MergeStrategyStateless::MergeStrategyStateless(
const FactoredTransitionSystem &fts,
const shared_ptr<MergeSelector> &merge_selector)
: MergeStrategy(fts),
merge_selector(merge_selector) {
}
pair<int, int> MergeStrategyStateless::get_next() {
return merge_selector->select_merge(fts);
}
}
| 431 |
C++
| 21.736841 | 52 | 0.74478 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_dfp.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_DFP_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_DFP_H
#include "merge_scoring_function.h"
namespace merge_and_shrink {
class TransitionSystem;
class MergeScoringFunctionDFP : public MergeScoringFunction {
std::vector<int> compute_label_ranks(
const FactoredTransitionSystem &fts, int index) const;
protected:
virtual std::string name() const override;
public:
MergeScoringFunctionDFP() = default;
virtual ~MergeScoringFunctionDFP() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return true;
}
};
}
#endif
| 891 |
C
| 27.774193 | 75 | 0.722783 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_goal_relevance.cc
|
#include "merge_scoring_function_goal_relevance.h"
#include "factored_transition_system.h"
#include "transition_system.h"
#include "utils.h"
#include "../options/option_parser.h"
#include "../options/plugin.h"
using namespace std;
namespace merge_and_shrink {
vector<double> MergeScoringFunctionGoalRelevance::compute_scores(
const FactoredTransitionSystem &fts,
const vector<pair<int, int>> &merge_candidates) {
int num_ts = fts.get_size();
vector<bool> goal_relevant(num_ts, false);
for (int ts_index : fts) {
const TransitionSystem &ts = fts.get_transition_system(ts_index);
if (is_goal_relevant(ts)) {
goal_relevant[ts_index] = true;
}
}
vector<double> scores;
scores.reserve(merge_candidates.size());
for (pair<int, int> merge_candidate : merge_candidates) {
int ts_index1 = merge_candidate.first;
int ts_index2 = merge_candidate.second;
int score = INF;
if (goal_relevant[ts_index1] || goal_relevant[ts_index2]) {
score = 0;
}
scores.push_back(score);
}
return scores;
}
string MergeScoringFunctionGoalRelevance::name() const {
return "goal relevance";
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Goal relevance scoring",
"This scoring function assigns a merge candidate a value of 0 iff at "
"least one of the two transition systems of the merge candidate is "
"goal relevant in the sense that there is an abstract non-goal state. "
"All other candidates get a score of positive infinity.");
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeScoringFunctionGoalRelevance>();
}
static options::Plugin<MergeScoringFunction> _plugin("goal_relevance", _parse);
}
| 1,867 |
C++
| 31.206896 | 79 | 0.670595 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/best_first_open_list.cc
|
#include "best_first_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/memory.h"
#include <cassert>
#include <deque>
#include <map>
using namespace std;
namespace standard_scalar_open_list {
template<class Entry>
class BestFirstOpenList : public OpenList<Entry> {
typedef deque<Entry> Bucket;
map<int, Bucket> buckets;
int size;
shared_ptr<Evaluator> evaluator;
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit BestFirstOpenList(const Options &opts);
BestFirstOpenList(const shared_ptr<Evaluator> &eval, bool preferred_only);
virtual ~BestFirstOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
};
template<class Entry>
BestFirstOpenList<Entry>::BestFirstOpenList(const Options &opts)
: OpenList<Entry>(opts.get<bool>("pref_only")),
size(0),
evaluator(opts.get<shared_ptr<Evaluator>>("eval")) {
}
template<class Entry>
BestFirstOpenList<Entry>::BestFirstOpenList(
const shared_ptr<Evaluator> &evaluator, bool preferred_only)
: OpenList<Entry>(preferred_only),
size(0),
evaluator(evaluator) {
}
template<class Entry>
void BestFirstOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
int key = eval_context.get_evaluator_value(evaluator.get());
buckets[key].push_back(entry);
++size;
}
template<class Entry>
Entry BestFirstOpenList<Entry>::remove_min() {
assert(size > 0);
auto it = buckets.begin();
assert(it != buckets.end());
Bucket &bucket = it->second;
assert(!bucket.empty());
Entry result = bucket.front();
bucket.pop_front();
if (bucket.empty())
buckets.erase(it);
--size;
return result;
}
template<class Entry>
bool BestFirstOpenList<Entry>::empty() const {
return size == 0;
}
template<class Entry>
void BestFirstOpenList<Entry>::clear() {
buckets.clear();
size = 0;
}
template<class Entry>
void BestFirstOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
evaluator->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool BestFirstOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
return eval_context.is_evaluator_value_infinite(evaluator.get());
}
template<class Entry>
bool BestFirstOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
return is_dead_end(eval_context) && evaluator->dead_ends_are_reliable();
}
BestFirstOpenListFactory::BestFirstOpenListFactory(
const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
BestFirstOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<BestFirstOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
BestFirstOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<BestFirstOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis(
"Best-first open list",
"Open list that uses a single evaluator and FIFO tiebreaking.");
parser.document_note(
"Implementation Notes",
"Elements with the same evaluator value are stored in double-ended "
"queues, called \"buckets\". The open list stores a map from evaluator "
"values to buckets. Pushing and popping from a bucket runs in constant "
"time. Therefore, inserting and removing an entry from the open list "
"takes time O(log(n)), where n is the number of buckets.");
parser.add_option<shared_ptr<Evaluator>>("eval", "evaluator");
parser.add_option<bool>(
"pref_only",
"insert only nodes generated by preferred operators", "false");
Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<BestFirstOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("single", _parse);
}
| 4,462 |
C++
| 28.169934 | 82 | 0.697221 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/tiebreaking_open_list.cc
|
#include "tiebreaking_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/memory.h"
#include <cassert>
#include <deque>
#include <map>
#include <utility>
#include <vector>
using namespace std;
namespace tiebreaking_open_list {
template<class Entry>
class TieBreakingOpenList : public OpenList<Entry> {
using Bucket = deque<Entry>;
map<const vector<int>, Bucket> buckets;
int size;
vector<shared_ptr<Evaluator>> evaluators;
/*
If allow_unsafe_pruning is true, we ignore (don't insert) states
which the first evaluator considers a dead end, even if it is
not a safe heuristic.
*/
bool allow_unsafe_pruning;
int dimension() const;
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit TieBreakingOpenList(const Options &opts);
virtual ~TieBreakingOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
};
template<class Entry>
TieBreakingOpenList<Entry>::TieBreakingOpenList(const Options &opts)
: OpenList<Entry>(opts.get<bool>("pref_only")),
size(0), evaluators(opts.get_list<shared_ptr<Evaluator>>("evals")),
allow_unsafe_pruning(opts.get<bool>("unsafe_pruning")) {
}
template<class Entry>
void TieBreakingOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
vector<int> key;
key.reserve(evaluators.size());
for (const shared_ptr<Evaluator> &evaluator : evaluators)
key.push_back(eval_context.get_evaluator_value_or_infinity(evaluator.get()));
buckets[key].push_back(entry);
++size;
}
template<class Entry>
Entry TieBreakingOpenList<Entry>::remove_min() {
assert(size > 0);
typename map<const vector<int>, Bucket>::iterator it;
it = buckets.begin();
assert(it != buckets.end());
assert(!it->second.empty());
--size;
Entry result = it->second.front();
it->second.pop_front();
if (it->second.empty())
buckets.erase(it);
return result;
}
template<class Entry>
bool TieBreakingOpenList<Entry>::empty() const {
return size == 0;
}
template<class Entry>
void TieBreakingOpenList<Entry>::clear() {
buckets.clear();
size = 0;
}
template<class Entry>
int TieBreakingOpenList<Entry>::dimension() const {
return evaluators.size();
}
template<class Entry>
void TieBreakingOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
for (const shared_ptr<Evaluator> &evaluator : evaluators)
evaluator->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool TieBreakingOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
// TODO: Properly document this behaviour.
// If one safe heuristic detects a dead end, return true.
if (is_reliable_dead_end(eval_context))
return true;
// If the first heuristic detects a dead-end and we allow "unsafe
// pruning", return true.
if (allow_unsafe_pruning &&
eval_context.is_evaluator_value_infinite(evaluators[0].get()))
return true;
// Otherwise, return true if all heuristics agree this is a dead-end.
for (const shared_ptr<Evaluator> &evaluator : evaluators)
if (!eval_context.is_evaluator_value_infinite(evaluator.get()))
return false;
return true;
}
template<class Entry>
bool TieBreakingOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
for (const shared_ptr<Evaluator> &evaluator : evaluators)
if (eval_context.is_evaluator_value_infinite(evaluator.get()) &&
evaluator->dead_ends_are_reliable())
return true;
return false;
}
TieBreakingOpenListFactory::TieBreakingOpenListFactory(const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
TieBreakingOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<TieBreakingOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
TieBreakingOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<TieBreakingOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis("Tie-breaking open list", "");
parser.add_list_option<shared_ptr<Evaluator>>("evals", "evaluators");
parser.add_option<bool>(
"pref_only",
"insert only nodes generated by preferred operators", "false");
parser.add_option<bool>(
"unsafe_pruning",
"allow unsafe pruning when the main evaluator regards a state a dead end",
"true");
Options opts = parser.parse();
opts.verify_list_non_empty<shared_ptr<Evaluator>>("evals");
if (parser.dry_run())
return nullptr;
else
return make_shared<TieBreakingOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("tiebreaking", _parse);
}
| 5,348 |
C++
| 29.565714 | 85 | 0.690912 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/tiebreaking_open_list.h
|
#ifndef OPEN_LISTS_TIEBREAKING_OPEN_LIST_H
#define OPEN_LISTS_TIEBREAKING_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
namespace tiebreaking_open_list {
class TieBreakingOpenListFactory : public OpenListFactory {
Options options;
public:
explicit TieBreakingOpenListFactory(const Options &options);
virtual ~TieBreakingOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 575 |
C
| 27.799999 | 77 | 0.763478 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/pareto_open_list.h
|
#ifndef OPEN_LISTS_PARETO_OPEN_LIST_H
#define OPEN_LISTS_PARETO_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
namespace pareto_open_list {
class ParetoOpenListFactory : public OpenListFactory {
Options options;
public:
explicit ParetoOpenListFactory(const Options &options);
virtual ~ParetoOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 545 |
C
| 26.299999 | 77 | 0.750459 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/type_based_open_list.cc
|
#include "type_based_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/collections.h"
#include "../utils/hash.h"
#include "../utils/markup.h"
#include "../utils/memory.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <memory>
#include <unordered_map>
#include <vector>
using namespace std;
namespace type_based_open_list {
template<class Entry>
class TypeBasedOpenList : public OpenList<Entry> {
shared_ptr<utils::RandomNumberGenerator> rng;
vector<shared_ptr<Evaluator>> evaluators;
using Key = vector<int>;
using Bucket = vector<Entry>;
vector<pair<Key, Bucket>> keys_and_buckets;
utils::HashMap<Key, int> key_to_bucket_index;
protected:
virtual void do_insertion(
EvaluationContext &eval_context, const Entry &entry) override;
public:
explicit TypeBasedOpenList(const Options &opts);
virtual ~TypeBasedOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual bool is_dead_end(EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
};
template<class Entry>
void TypeBasedOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
vector<int> key;
key.reserve(evaluators.size());
for (const shared_ptr<Evaluator> &evaluator : evaluators) {
key.push_back(
eval_context.get_evaluator_value_or_infinity(evaluator.get()));
}
auto it = key_to_bucket_index.find(key);
if (it == key_to_bucket_index.end()) {
key_to_bucket_index[key] = keys_and_buckets.size();
keys_and_buckets.push_back(make_pair(move(key), Bucket({entry})));
} else {
size_t bucket_index = it->second;
assert(utils::in_bounds(bucket_index, keys_and_buckets));
keys_and_buckets[bucket_index].second.push_back(entry);
}
}
template<class Entry>
TypeBasedOpenList<Entry>::TypeBasedOpenList(const Options &opts)
: rng(utils::parse_rng_from_options(opts)),
evaluators(opts.get_list<shared_ptr<Evaluator>>("evaluators")) {
}
template<class Entry>
Entry TypeBasedOpenList<Entry>::remove_min() {
size_t bucket_id = (*rng)(keys_and_buckets.size());
auto &key_and_bucket = keys_and_buckets[bucket_id];
const Key &min_key = key_and_bucket.first;
Bucket &bucket = key_and_bucket.second;
int pos = (*rng)(bucket.size());
Entry result = utils::swap_and_pop_from_vector(bucket, pos);
if (bucket.empty()) {
// Swap the empty bucket with the last bucket, then delete it.
key_to_bucket_index[keys_and_buckets.back().first] = bucket_id;
key_to_bucket_index.erase(min_key);
utils::swap_and_pop_from_vector(keys_and_buckets, bucket_id);
}
return result;
}
template<class Entry>
bool TypeBasedOpenList<Entry>::empty() const {
return keys_and_buckets.empty();
}
template<class Entry>
void TypeBasedOpenList<Entry>::clear() {
keys_and_buckets.clear();
key_to_bucket_index.clear();
}
template<class Entry>
bool TypeBasedOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
// If one evaluator is sure we have a dead end, return true.
if (is_reliable_dead_end(eval_context))
return true;
// Otherwise, return true if all evaluators agree this is a dead-end.
for (const shared_ptr<Evaluator> &evaluator : evaluators) {
if (!eval_context.is_evaluator_value_infinite(evaluator.get()))
return false;
}
return true;
}
template<class Entry>
bool TypeBasedOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
for (const shared_ptr<Evaluator> &evaluator : evaluators) {
if (evaluator->dead_ends_are_reliable() &&
eval_context.is_evaluator_value_infinite(evaluator.get()))
return true;
}
return false;
}
template<class Entry>
void TypeBasedOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
for (const shared_ptr<Evaluator> &evaluator : evaluators) {
evaluator->get_path_dependent_evaluators(evals);
}
}
TypeBasedOpenListFactory::TypeBasedOpenListFactory(
const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
TypeBasedOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<TypeBasedOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
TypeBasedOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<TypeBasedOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis(
"Type-based open list",
"Uses multiple evaluators to assign entries to buckets. "
"All entries in a bucket have the same evaluator values. "
"When retrieving an entry, a bucket is chosen uniformly at "
"random and one of the contained entries is selected "
"uniformly randomly. "
"The algorithm is based on" + utils::format_conference_reference(
{"Fan Xie", "Martin Mueller", "Robert Holte", "Tatsuya Imai"},
"Type-Based Exploration with Multiple Search Queues for"
" Satisficing Planning",
"http://www.aaai.org/ocs/index.php/AAAI/AAAI14/paper/view/8472/8705",
"Proceedings of the Twenty-Eigth AAAI Conference Conference"
" on Artificial Intelligence (AAAI 2014)",
"2395-2401",
"AAAI Press",
"2014"));
parser.add_list_option<shared_ptr<Evaluator>>(
"evaluators",
"Evaluators used to determine the bucket for each entry.");
utils::add_rng_options(parser);
Options opts = parser.parse();
opts.verify_list_non_empty<shared_ptr<Evaluator>>("evaluators");
if (parser.dry_run())
return nullptr;
else
return make_shared<TypeBasedOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("type_based", _parse);
}
| 6,251 |
C++
| 32.433155 | 82 | 0.677812 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/best_first_open_list.h
|
#ifndef OPEN_LISTS_BEST_FIRST_OPEN_LIST_H
#define OPEN_LISTS_BEST_FIRST_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
/*
Open list indexed by a single int, using FIFO tie-breaking.
Implemented as a map from int to deques.
*/
namespace standard_scalar_open_list {
class BestFirstOpenListFactory : public OpenListFactory {
Options options;
public:
explicit BestFirstOpenListFactory(const Options &options);
virtual ~BestFirstOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 685 |
C
| 24.407407 | 77 | 0.745985 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/alternation_open_list.h
|
#ifndef OPEN_LISTS_ALTERNATION_OPEN_LIST_H
#define OPEN_LISTS_ALTERNATION_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
namespace alternation_open_list {
class AlternationOpenListFactory : public OpenListFactory {
Options options;
public:
explicit AlternationOpenListFactory(const Options &options);
virtual ~AlternationOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 575 |
C
| 27.799999 | 77 | 0.763478 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/epsilon_greedy_open_list.cc
|
#include "epsilon_greedy_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/collections.h"
#include "../utils/markup.h"
#include "../utils/memory.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <functional>
#include <memory>
using namespace std;
namespace epsilon_greedy_open_list {
template<class Entry>
class EpsilonGreedyOpenList : public OpenList<Entry> {
shared_ptr<utils::RandomNumberGenerator> rng;
struct HeapNode {
int id;
int h;
Entry entry;
HeapNode(int id, int h, const Entry &entry)
: id(id), h(h), entry(entry) {
}
bool operator>(const HeapNode &other) const {
return make_pair(h, id) > make_pair(other.h, other.id);
}
};
vector<HeapNode> heap;
shared_ptr<Evaluator> evaluator;
double epsilon;
int size;
int next_id;
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit EpsilonGreedyOpenList(const Options &opts);
virtual ~EpsilonGreedyOpenList() override = default;
virtual Entry remove_min() override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
virtual bool empty() const override;
virtual void clear() override;
};
template<class HeapNode>
static void adjust_heap_up(vector<HeapNode> &heap, size_t pos) {
assert(utils::in_bounds(pos, heap));
while (pos != 0) {
size_t parent_pos = (pos - 1) / 2;
if (heap[pos] > heap[parent_pos]) {
break;
}
swap(heap[pos], heap[parent_pos]);
pos = parent_pos;
}
}
template<class Entry>
void EpsilonGreedyOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
heap.emplace_back(
next_id++, eval_context.get_evaluator_value(evaluator.get()), entry);
push_heap(heap.begin(), heap.end(), greater<HeapNode>());
++size;
}
template<class Entry>
EpsilonGreedyOpenList<Entry>::EpsilonGreedyOpenList(const Options &opts)
: OpenList<Entry>(opts.get<bool>("pref_only")),
rng(utils::parse_rng_from_options(opts)),
evaluator(opts.get<shared_ptr<Evaluator>>("eval")),
epsilon(opts.get<double>("epsilon")),
size(0),
next_id(0) {
}
template<class Entry>
Entry EpsilonGreedyOpenList<Entry>::remove_min() {
assert(size > 0);
if ((*rng)() < epsilon) {
int pos = (*rng)(size);
heap[pos].h = numeric_limits<int>::min();
adjust_heap_up(heap, pos);
}
pop_heap(heap.begin(), heap.end(), greater<HeapNode>());
HeapNode heap_node = heap.back();
heap.pop_back();
--size;
return heap_node.entry;
}
template<class Entry>
bool EpsilonGreedyOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
return eval_context.is_evaluator_value_infinite(evaluator.get());
}
template<class Entry>
bool EpsilonGreedyOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
return is_dead_end(eval_context) && evaluator->dead_ends_are_reliable();
}
template<class Entry>
void EpsilonGreedyOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
evaluator->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool EpsilonGreedyOpenList<Entry>::empty() const {
return size == 0;
}
template<class Entry>
void EpsilonGreedyOpenList<Entry>::clear() {
heap.clear();
size = 0;
next_id = 0;
}
EpsilonGreedyOpenListFactory::EpsilonGreedyOpenListFactory(
const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
EpsilonGreedyOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<EpsilonGreedyOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
EpsilonGreedyOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<EpsilonGreedyOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis(
"Epsilon-greedy open list",
"Chooses an entry uniformly randomly with probability "
"'epsilon', otherwise it returns the minimum entry. "
"The algorithm is based on" + utils::format_conference_reference(
{"Richard Valenzano", "Nathan R. Sturtevant",
"Jonathan Schaeffer", "Fan Xie"},
"A Comparison of Knowledge-Based GBFS Enhancements and"
" Knowledge-Free Exploration",
"http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7943/8066",
"Proceedings of the Twenty-Fourth International Conference"
" on Automated Planning and Scheduling (ICAPS 2014)",
"375-379",
"AAAI Press",
"2014"));
parser.add_option<shared_ptr<Evaluator>>("eval", "evaluator");
parser.add_option<bool>(
"pref_only",
"insert only nodes generated by preferred operators", "false");
parser.add_option<double>(
"epsilon",
"probability for choosing the next entry randomly",
"0.2",
Bounds("0.0", "1.0"));
utils::add_rng_options(parser);
Options opts = parser.parse();
if (parser.dry_run()) {
return nullptr;
} else {
return make_shared<EpsilonGreedyOpenListFactory>(opts);
}
}
static Plugin<OpenListFactory> _plugin("epsilon_greedy", _parse);
}
| 5,716 |
C++
| 28.776042 | 86 | 0.655003 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/epsilon_greedy_open_list.h
|
#ifndef OPEN_LISTS_EPSILON_GREEDY_OPEN_LIST_H
#define OPEN_LISTS_EPSILON_GREEDY_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
/*
Epsilon-greedy open list based on Valenzano et al. (ICAPS 2014).
With probability epsilon the next entry is selected uniformly
randomly, otherwise the minimum entry is chosen. While the original
implementation by Valenzano et al. is based on buckets (personal
communication with the authors), this implementation stores entries
in a heap. It is usually desirable to let open lists break ties in
FIFO order. When using a heap, this can be achieved without using
significantly more time by assigning increasing IDs to new entries
and using the IDs as tiebreakers for entries with identical values.
On the other hand, FIFO tiebreaking induces a substantial worst-case
runtime penalty for bucket-based implementations. In the table below
we list the worst-case time complexities for the discussed
implementation strategies.
n: number of entries
m: number of buckets
Buckets Buckets (no FIFO) Heap
Insert entry O(log(m)) O(log(m)) O(log(n))
Remove random entry O(m + n) O(m) O(log(n))
Remove minimum entry O(log(m)) O(log(m)) O(log(n))
These results assume that the buckets are implemented as deques and
are stored in a sorted dictionary, mapping from evaluator values to
buckets. For inserting a new entry and removing the minimum entry the
bucket-based implementations need to find the correct bucket
(O(log(m))) and can then push or pop from one end of the deque
(O(1)). For returning a random entry, bucket-based implementations
need to loop over all buckets (O(m)) to find the one that contains
the randomly selected entry. If FIFO ordering is ignored, one can use
swap-and-pop to remove the entry in constant time. Otherwise, the
removal is linear in the number of entries in the bucket (O(n), since
there could be only one bucket).
*/
namespace epsilon_greedy_open_list {
class EpsilonGreedyOpenListFactory : public OpenListFactory {
Options options;
public:
explicit EpsilonGreedyOpenListFactory(const Options &options);
virtual ~EpsilonGreedyOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 2,574 |
C
| 43.396551 | 77 | 0.707848 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/alternation_open_list.cc
|
#include "alternation_open_list.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/memory.h"
#include "../utils/system.h"
#include <cassert>
#include <memory>
#include <vector>
using namespace std;
using utils::ExitCode;
namespace alternation_open_list {
template<class Entry>
class AlternationOpenList : public OpenList<Entry> {
vector<unique_ptr<OpenList<Entry>>> open_lists;
vector<int> priorities;
const int boost_amount;
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit AlternationOpenList(const Options &opts);
virtual ~AlternationOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual void boost_preferred() override;
virtual void get_path_dependent_evaluators(
set<Evaluator *> &evals) override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
};
template<class Entry>
AlternationOpenList<Entry>::AlternationOpenList(const Options &opts)
: boost_amount(opts.get<int>("boost")) {
vector<shared_ptr<OpenListFactory>> open_list_factories(
opts.get_list<shared_ptr<OpenListFactory>>("sublists"));
open_lists.reserve(open_list_factories.size());
for (const auto &factory : open_list_factories)
open_lists.push_back(factory->create_open_list<Entry>());
priorities.resize(open_lists.size(), 0);
}
template<class Entry>
void AlternationOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
for (const auto &sublist : open_lists)
sublist->insert(eval_context, entry);
}
template<class Entry>
Entry AlternationOpenList<Entry>::remove_min() {
int best = -1;
for (size_t i = 0; i < open_lists.size(); ++i) {
if (!open_lists[i]->empty() &&
(best == -1 || priorities[i] < priorities[best])) {
best = i;
}
}
assert(best != -1);
const auto &best_list = open_lists[best];
assert(!best_list->empty());
++priorities[best];
return best_list->remove_min();
}
template<class Entry>
bool AlternationOpenList<Entry>::empty() const {
for (const auto &sublist : open_lists)
if (!sublist->empty())
return false;
return true;
}
template<class Entry>
void AlternationOpenList<Entry>::clear() {
for (const auto &sublist : open_lists)
sublist->clear();
}
template<class Entry>
void AlternationOpenList<Entry>::boost_preferred() {
for (size_t i = 0; i < open_lists.size(); ++i)
if (open_lists[i]->only_contains_preferred_entries())
priorities[i] -= boost_amount;
}
template<class Entry>
void AlternationOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
for (const auto &sublist : open_lists)
sublist->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool AlternationOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
// If one sublist is sure we have a dead end, return true.
if (is_reliable_dead_end(eval_context))
return true;
// Otherwise, return true if all sublists agree this is a dead-end.
for (const auto &sublist : open_lists)
if (!sublist->is_dead_end(eval_context))
return false;
return true;
}
template<class Entry>
bool AlternationOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
for (const auto &sublist : open_lists)
if (sublist->is_reliable_dead_end(eval_context))
return true;
return false;
}
AlternationOpenListFactory::AlternationOpenListFactory(const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
AlternationOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<AlternationOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
AlternationOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<AlternationOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis("Alternation open list",
"alternates between several open lists.");
parser.add_list_option<shared_ptr<OpenListFactory>>(
"sublists",
"open lists between which this one alternates");
parser.add_option<int>(
"boost",
"boost value for contained open lists that are restricted "
"to preferred successors",
"0");
Options opts = parser.parse();
opts.verify_list_non_empty<shared_ptr<OpenListFactory>>("sublists");
if (parser.dry_run())
return nullptr;
else
return make_shared<AlternationOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("alt", _parse);
}
| 5,060 |
C++
| 29.305389 | 84 | 0.674901 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/type_based_open_list.h
|
#ifndef OPEN_LISTS_TYPE_BASED_OPEN_LIST_H
#define OPEN_LISTS_TYPE_BASED_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
/*
Type-based open list based on Xie et al. (AAAI 2014; see detailed
reference in plug-in documentation).
The original implementation uses a std::map for storing and looking
up buckets. Our implementation stores the buckets in a std::vector
and uses a std::unordered_map for looking up indexes in the vector.
In the table below we list the amortized worst-case time complexities
for the original implementation and the version below.
n = number of entries
m = number of buckets
Original Code below
Insert entry O(log(m)) O(1)
Remove entry O(m) O(1) # both use swap+pop
*/
namespace type_based_open_list {
class TypeBasedOpenListFactory : public OpenListFactory {
Options options;
public:
explicit TypeBasedOpenListFactory(const Options &options);
virtual ~TypeBasedOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 1,243 |
C
| 30.099999 | 77 | 0.69992 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/pareto_open_list.cc
|
#include "pareto_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/hash.h"
#include "../utils/memory.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
#include <deque>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
using namespace std;
namespace pareto_open_list {
template<class Entry>
class ParetoOpenList : public OpenList<Entry> {
shared_ptr<utils::RandomNumberGenerator> rng;
using Bucket = deque<Entry>;
using KeyType = vector<int>;
using BucketMap = utils::HashMap<KeyType, Bucket>;
using KeySet = set<KeyType>;
BucketMap buckets;
KeySet nondominated;
bool state_uniform_selection;
vector<shared_ptr<Evaluator>> evaluators;
bool dominates(const KeyType &v1, const KeyType &v2) const;
bool is_nondominated(
const KeyType &vec, KeySet &domination_candidates) const;
void remove_key(const KeyType &key);
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit ParetoOpenList(const Options &opts);
virtual ~ParetoOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
static OpenList<Entry> *_parse(OptionParser &p);
};
template<class Entry>
ParetoOpenList<Entry>::ParetoOpenList(const Options &opts)
: OpenList<Entry>(opts.get<bool>("pref_only")),
rng(utils::parse_rng_from_options(opts)),
state_uniform_selection(opts.get<bool>("state_uniform_selection")),
evaluators(opts.get_list<shared_ptr<Evaluator>>("evals")) {
}
template<class Entry>
bool ParetoOpenList<Entry>::dominates(
const KeyType &v1, const KeyType &v2) const {
assert(v1.size() == v2.size());
bool are_different = false;
for (size_t i = 0; i < v1.size(); ++i) {
if (v1[i] > v2[i])
return false;
else if (v1[i] < v2[i])
are_different = true;
}
return are_different;
}
template<class Entry>
bool ParetoOpenList<Entry>::is_nondominated(
const KeyType &vec, KeySet &domination_candidates) const {
for (const KeyType &candidate : domination_candidates)
if (dominates(candidate, vec))
return false;
return true;
}
template<class Entry>
void ParetoOpenList<Entry>::remove_key(const KeyType &key) {
/*
We must copy the key because it is likely to live inside the
data structures from which we remove it here and hence becomes
invalid at that point.
*/
vector<int> copied_key(key);
nondominated.erase(copied_key);
buckets.erase(copied_key);
KeySet candidates;
for (const auto &bucket_pair : buckets) {
const KeyType &bucket_key = bucket_pair.first;
/*
If the estimate vector of the bucket is not already in the
set of nondominated estimate vectors and the vector was
previously dominated by key and the estimate vector is not
dominated by any vector from the set of nondominated
vectors, we add it to the candidates.
*/
if (!nondominated.count(bucket_key) &&
dominates(copied_key, bucket_key) &&
is_nondominated(bucket_key, nondominated))
candidates.insert(bucket_key);
}
for (const KeyType &candidate : candidates)
if (is_nondominated(candidate, candidates))
nondominated.insert(candidate);
}
template<class Entry>
void ParetoOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
vector<int> key;
key.reserve(evaluators.size());
for (const shared_ptr<Evaluator> &evaluator : evaluators)
key.push_back(eval_context.get_evaluator_value_or_infinity(evaluator.get()));
Bucket &bucket = buckets[key];
bool newkey = bucket.empty();
bucket.push_back(entry);
if (newkey && is_nondominated(key, nondominated)) {
/*
Delete previously nondominated keys that are now dominated
by key.
Note: this requires that nondominated is a "normal"
set (no hash set) because then iterators are not
invalidated by erase(it).
*/
auto it = nondominated.begin();
while (it != nondominated.end()) {
if (dominates(key, *it)) {
auto tmp_it = it;
++it;
nondominated.erase(tmp_it);
} else {
++it;
}
}
// Insert new key.
nondominated.insert(key);
}
}
template<class Entry>
Entry ParetoOpenList<Entry>::remove_min() {
typename KeySet::iterator selected = nondominated.begin();
int seen = 0;
for (typename KeySet::iterator it = nondominated.begin();
it != nondominated.end(); ++it) {
int numerator;
if (state_uniform_selection)
numerator = it->size();
else
numerator = 1;
seen += numerator;
if ((*rng)(seen) < numerator)
selected = it;
}
Bucket &bucket = buckets[*selected];
Entry result = bucket.front();
bucket.pop_front();
if (bucket.empty())
remove_key(*selected);
return result;
}
template<class Entry>
bool ParetoOpenList<Entry>::empty() const {
return nondominated.empty();
}
template<class Entry>
void ParetoOpenList<Entry>::clear() {
buckets.clear();
nondominated.clear();
}
template<class Entry>
void ParetoOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
for (const shared_ptr<Evaluator> &evaluator : evaluators)
evaluator->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool ParetoOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
// TODO: Document this behaviour.
// If one safe heuristic detects a dead end, return true.
if (is_reliable_dead_end(eval_context))
return true;
// Otherwise, return true if all heuristics agree this is a dead-end.
for (const shared_ptr<Evaluator> &evaluator : evaluators)
if (!eval_context.is_evaluator_value_infinite(evaluator.get()))
return false;
return true;
}
template<class Entry>
bool ParetoOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
for (const shared_ptr<Evaluator> &evaluator : evaluators)
if (eval_context.is_evaluator_value_infinite(evaluator.get()) &&
evaluator->dead_ends_are_reliable())
return true;
return false;
}
ParetoOpenListFactory::ParetoOpenListFactory(
const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
ParetoOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<ParetoOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
ParetoOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<ParetoOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis(
"Pareto open list",
"Selects one of the Pareto-optimal (regarding the sub-evaluators) "
"entries for removal.");
parser.add_list_option<shared_ptr<Evaluator>>("evals", "evaluators");
parser.add_option<bool>(
"pref_only",
"insert only nodes generated by preferred operators", "false");
parser.add_option<bool>(
"state_uniform_selection",
"When removing an entry, we select a non-dominated bucket "
"and return its oldest entry. If this option is false, we select "
"uniformly from the non-dominated buckets; if the option is true, "
"we weight the buckets with the number of entries.",
"false");
utils::add_rng_options(parser);
Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<ParetoOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("pareto", _parse);
}
| 8,406 |
C++
| 30.369403 | 85 | 0.652629 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/parser.py
|
#! /usr/bin/env python
import logging
import re
from lab.parser import Parser
class CommonParser(Parser):
def add_difference(self, diff, val1, val2):
def diff_func(content, props):
if props.get(val1) is None or props.get(val2) is None:
diff_val = None
else:
diff_val = props.get(val1) - props.get(val2)
props[diff] = diff_val
self.add_function(diff_func)
def _get_flags(self, flags_string):
flags = 0
for char in flags_string:
flags |= getattr(re, char)
return flags
def add_repeated_pattern(
self, name, regex, file="run.log", required=False, type=int,
flags=""):
def find_all_occurences(content, props):
matches = re.findall(regex, content, flags=self._get_flags(flags))
if required and not matches:
logging.error("Pattern {0} not found in file {1}".format(regex, file))
props[name] = [type(m) for m in matches]
self.add_function(find_all_occurences, file=file)
def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""):
Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags)
def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""):
def search_from_bottom(content, props):
reversed_content = "\n".join(reversed(content.splitlines()))
match = re.search(regex, reversed_content, flags=self._get_flags(flags))
if required and not match:
logging.error("Pattern {0} not found in file {1}".format(regex, file))
if match:
props[name] = type(match.group(1))
self.add_function(search_from_bottom, file=file)
def no_search(content, props):
if "search_start_time" not in props:
error = props.get("error")
if error is not None and error != "incomplete-search-found-no-plan":
props["error"] = "no-search-due-to-" + error
REFINEMENT_ATTRIBUTES = [
("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"),
("time_for_finding_flaws", r"Time for finding flaws: (.+)s"),
("time_for_splitting_states", r"Time for splitting states: (.+)s"),
]
def compute_total_times(content, props):
for attribute, pattern in REFINEMENT_ATTRIBUTES:
props["total_" + attribute] = sum(props[attribute])
def add_time_analysis(content, props):
init_time = props.get("init_time")
if not init_time:
return
parts = []
parts.append("{init_time:.2f}:".format(**props))
for attribute, pattern in REFINEMENT_ATTRIBUTES:
time = props["total_" + attribute]
relative_time = time / init_time
print time, type(time)
parts.append("{:.2f} ({:.2f})".format(time, relative_time))
props["time_analysis"] = " ".join(parts)
def main():
parser = CommonParser()
parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float)
parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int)
parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float)
parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int)
for attribute, pattern in REFINEMENT_ATTRIBUTES:
parser.add_repeated_pattern(attribute, pattern, type=float, required=False)
parser.add_function(no_search)
parser.add_function(compute_total_times)
parser.add_function(add_time_analysis)
parser.parse()
if __name__ == "__main__":
main()
| 3,743 |
Python
| 34.657143 | 109 | 0.617686 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,786 |
Python
| 36.435443 | 82 | 0.618355 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-base", "issue880-v1"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick + "-" + max_transitions_nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for max_transitions_nick, max_transitions in [("1M", 1000000), ("10M", 10000000)]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]),
("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
#"depot:p02.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
REFINEMENT_ATTRIBUTES +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump", "total_time_for_splitting_states", "total_time_for_finding_traces"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,796 |
Python
| 33.109756 | 134 | 0.670601 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/v2-max-transitions.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-base", "issue880-v2"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
"{nick}-{million_transitions}M".format(**locals()),
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for million_transitions in [1, 2, 5, 10]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(max_transitions=million_transitions * 10**6)]),
("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(max_transitions=million_transitions * 10**6)]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
"depot:p01.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,741 |
Python
| 32.851851 | 167 | 0.673112 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/v2-900s.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-base", "issue880-v2"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for nick, config in [
("cegar-original-900s", ["--search", "astar(cegar(subtasks=[original()], max_transitions=infinity, max_time=900))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
"depot:p01.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
REFINEMENT_ATTRIBUTES +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,503 |
Python
| 30.3 | 144 | 0.671594 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if not val1 or not val2:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,867 |
Python
| 35.490566 | 78 | 0.598397 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-v1", "issue880-v2"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick + "-" + max_transitions_nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for max_transitions_nick, max_transitions in [("1M", 1000000)]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]),
#("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
"depot:p01.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
REFINEMENT_ATTRIBUTES +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,707 |
Python
| 32.02439 | 134 | 0.668637 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue635/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step(
'publish-absolute-report', subprocess.call, ['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step(
"publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step(step_name, make_scatter_plots))
| 11,446 |
Python
| 33.068452 | 79 | 0.597152 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue635/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue635-base", "issue635-v1"]
CONFIGS = [
IssueConfig(
heuristic,
["--search", "astar({})".format(heuristic)],
driver_options=["--search-time-limit", "10m"])
for heuristic in ["hm(m=2)", "ipdb()", "cea()", "cg()"]
]
SUITE = [
'airport', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery',
'nomystery-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips',
'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips',
'woodworking-opt11-strips', 'zenotravel']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"])
exp()
| 1,814 |
Python
| 33.245282 | 77 | 0.684675 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue635/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 |
Python
| 35.654205 | 78 | 0.597042 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue635/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue635-base", "issue635-v3"]
CONFIGS = [
IssueConfig(
heuristic,
["--search", "astar({})".format(heuristic)],
driver_options=["--search-time-limit", "10m"])
for heuristic in ["hm(m=2)"]
]
SUITE = [
'airport', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery',
'nomystery-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips',
'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips',
'woodworking-opt11-strips', 'zenotravel']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"])
exp()
| 1,787 |
Python
| 32.735848 | 77 | 0.689983 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue635/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue635-base", "issue635-v2"]
CONFIGS = [
IssueConfig(
heuristic,
["--search", "astar({})".format(heuristic)],
driver_options=["--search-time-limit", "10m"])
for heuristic in ["hm(m=2)"]
]
SUITE = [
'airport', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery',
'nomystery-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips',
'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips',
'woodworking-opt11-strips', 'zenotravel']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"])
exp()
| 1,787 |
Python
| 32.735848 | 77 | 0.689983 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v4-single-cegar-allow-merging-options.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v4"]
random_seed=2018
CONFIGS = [
### cpdbs
IssueConfig('cpdbs-singlecegar-wildcardplans-allowmergingall-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-singlecegar-wildcardplans-allowmergingprec-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=precondition_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-singlecegar-wildcardplans-forbidmerging-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=never,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
### pho
IssueConfig('pho-singlecegar-wildcardplans-allowmergingall-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]),
IssueConfig('pho-singlecegar-wildcardplans-allowmergingprec-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=precondition_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]),
IssueConfig('pho-singlecegar-wildcardplans-forbidmerging-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=never,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('v3-parser.py')
attributes=exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend([
'single_cegar_pdbs_solved_without_search',
'single_cegar_pdbs_computation_time',
'single_cegar_pdbs_timed_out',
'single_cegar_pdbs_num_iterations',
'single_cegar_pdbs_collection_num_patterns',
'single_cegar_pdbs_collection_summed_pdb_size',
])
exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
exp.run_steps()
| 4,453 |
Python
| 61.732393 | 425 | 0.777229 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v16-v17-ipdb-sys.py
|
#! /usr/bin/env python3
import itertools
import math
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-base-v2", "issue1007-v16", "issue1007-v17"]
random_seed=2018
MAX_TIME=900
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']),
IssueConfig('cpdbs-sys2', ['--search', 'astar(cpdbs(systematic(pattern_max_size=2)),verbosity=silent)']),
IssueConfig('cpdbs-sys3', ['--search', 'astar(cpdbs(systematic(pattern_max_size=3)),verbosity=silent)']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
attributes = [
cpdbs_num_patterns,
cpdbs_total_pdb_size,
cpdbs_computation_time,
score_cpdbs_computation_time,
]
attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES)
attributes.append('initial_h_value')
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score])
exp.add_fetcher('data/issue1007-v15-ipdb-sys-eval', filter_algorithm=[
f'issue1007-v15-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v15-cpdbs-sys2',
f'issue1007-v15-cpdbs-sys3',
],merge=True)
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v15", "issue1007-v16"),
("issue1007-v16", "issue1007-v17"),
("issue1007-v15", "issue1007-v17"),
("issue1007-base-v2", "issue1007-v16"),
("issue1007-base-v2", "issue1007-v17"),
],
attributes=attributes,
filter=[add_computation_time_score],
)
exp.run_steps()
| 4,860 |
Python
| 39.173553 | 808 | 0.717695 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v4.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v4"]
random_seed=2018
CONFIGS = [
### single cegar
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
### multiple cegar
IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.run_steps()
| 4,902 |
Python
| 72.179103 | 808 | 0.771114 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v14-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import math
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v14"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### ipdb
IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']),
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False)
attributes = [
cpdbs_num_patterns,
cpdbs_total_pdb_size,
cpdbs_computation_time,
score_cpdbs_computation_time,
cegar_num_iterations,
cegar_num_patterns,
cegar_total_pdb_size,
cegar_computation_time,
score_cegar_computation_time,
]
attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES)
attributes.append('initial_h_value')
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_absolute_report_step(attributes=attributes)
outfile = os.path.join(
exp.eval_dir,
f"{exp.name}-{REVISIONS[0]}-{REVISIONS[1]}-compare-hillclimbing.html")
name="make-comparison-tables-hillclimbing"
exp.add_report(
ComparativeReport(
[
(f'{REVISIONS[0]}-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}'),
],
attributes=attributes,
filter=[add_computation_time_score],
),
name=name,
outfile=outfile,
)
exp.add_step(
f"publish-{name}",
subprocess.call,
["publish", outfile],
)
outfile = os.path.join(
exp.eval_dir,
f"{exp.name}-{REVISIONS[0]}-{REVISIONS[1]}-compare-cegar.html")
name="make-comparison-tables-cegar"
exp.add_report(
ComparativeReport(
[
(f'{REVISIONS[0]}-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}'),
],
attributes=attributes,
filter=[add_computation_time_score],
),
name=name,
outfile=outfile,
)
exp.add_step(
f"publish-{name}",
subprocess.call,
["publish", outfile],
)
exp.run_steps()
| 9,274 |
Python
| 53.239766 | 808 | 0.735497 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v5.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v5"]
random_seed=2018
CONFIGS = [
### single cegar
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
### multiple cegar
IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_fetcher('data/issue1007-v4-eval', merge=True)
exp._revisions = ["issue1007-v4", "issue1007-v5"]
exp.add_comparison_table_step()
exp.run_steps()
| 4,895 |
Python
| 67.957746 | 808 | 0.768131 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v9-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v9"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=1
CONFIGS = []
for random_seed in range(2018, 2028):
### ipdb
CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])),
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v8c", "issue1007-v9"),
],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
)
exp.run_steps()
| 4,494 |
Python
| 46.819148 | 808 | 0.727414 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v10-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v10"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### ipdb
IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']),
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 4,870 |
Python
| 64.824323 | 808 | 0.76345 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v7-v7b-v7c.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v7", "issue1007-v7b", "issue1007-v7c"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v7", "issue1007-v7b"),
("issue1007-v7b", "issue1007-v7c"),
],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
)
exp.run_steps()
| 4,186 |
Python
| 45.010989 | 808 | 0.72623 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v7-v7b-v7c-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v7", "issue1007-v7b", "issue1007-v7c"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 4,639 |
Python
| 64.352112 | 808 | 0.765036 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".git" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".git")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, name="make-comparison-tables", revisions=[], **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
if not revisions:
revisions = self._revisions
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step(name, make_comparison_tables)
self.add_step(
f"publish-{name}", publish_comparison_tables)
def add_comparison_table_step_for_revision_pairs(
self, revision_pairs, name="make-comparison-tables-for-revision-pairs", **kwargs):
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in revision_pairs:
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in revision_pairs:
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step(name, make_comparison_tables)
self.add_step(
f"publish-{name}", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
if config_nick2 is not None:
name += "-" + config_nick2
print("Make scatter plot for", name)
algo1 = get_algo_nick(rev1, config_nick)
algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2)
report = ScatterPlotReport(
filter_algorithm=[algo1, algo2],
attributes=[attribute],
relative=relative,
get_category=lambda run1, run2: run1["domain"])
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
for nick1, nick2, rev1, rev2, attribute in additional:
make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2)
self.add_step(step_name, make_scatter_plots)
| 16,361 |
Python
| 37.228972 | 95 | 0.607848 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v4-a-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v4-a"]
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_fetcher('data/issue1007-v4-multiple-seeds-eval',merge=True,filter_algorithm=[
'issue1007-v4-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v4-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
])
exp._revisions=["issue1007-v4", "issue1007-v4-a"]
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump']
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher('data/issue1007-v4-a-multiple-seeds-eval/average', merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step(
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump']
)
exp.run_steps()
| 4,432 |
Python
| 48.808988 | 808 | 0.742329 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v6.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v5", "issue1007-v6"]
random_seed=2018
CONFIGS = [
### single cegar
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
### multiple cegar
IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 4,806 |
Python
| 69.691175 | 808 | 0.769039 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v7-v8-v8b-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v7", "issue1007-v8", "issue1007-v8b"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 4,638 |
Python
| 64.338027 | 808 | 0.764985 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v18-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import math
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v17", "issue1007-v18"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=1
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False)
exp.add_absolute_report_step(attributes=['coverage'])
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=[
'coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', 'score_expansions',
'initial_h_value', cpdbs_num_patterns,
cpdbs_total_pdb_size, cpdbs_computation_time,
score_cpdbs_computation_time, cegar_num_iterations,
cegar_num_patterns, cegar_total_pdb_size,
cegar_computation_time, score_cegar_computation_time,
],
filter=[add_computation_time_score],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step(
attributes=[
'coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', 'score_expansions',
'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size,
cpdbs_computation_time, score_cpdbs_computation_time,
cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size,
cegar_computation_time, score_cegar_computation_time,
],
filter=[add_computation_time_score],
)
exp.run_steps()
| 6,635 |
Python
| 47.086956 | 808 | 0.716654 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/cpdbs-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('cpdbs_num_patterns', 'Canonical PDB heuristic number of patterns: (\d+)', required=False, type=int)
parser.add_pattern('cpdbs_total_pdb_size', 'Canonical PDB heuristic total PDB size: (\d+)', required=False, type=int)
parser.add_pattern('cpdbs_computation_time', 'Canonical PDB heuristic computation time: (.+)s', required=False, type=float)
parser.parse()
| 451 |
Python
| 40.090905 | 123 | 0.742794 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v3-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('single_cegar_pdbs_computation_time', 'CEGAR_PDBs: computation time: (.+)s', required=False, type=float)
parser.add_pattern('single_cegar_pdbs_num_iterations', 'CEGAR_PDBs: number of iterations: (\d+)', required=False, type=int)
parser.add_pattern('single_cegar_pdbs_collection_num_patterns', 'CEGAR_PDBs: final collection number of patterns: (.+)', required=False, type=int)
parser.add_pattern('single_cegar_pdbs_collection_summed_pdb_size', 'CEGAR_PDBs: final collection summed PDB sizes: (.+)', required=False, type=int)
def parse_lines(content, props):
single_cegar_pdbs_timed_out = False
single_cegar_pdbs_solved_without_search = False
for line in content.split('\n'):
if line == 'CEGAR_PDBs: time limit reached':
single_cegar_pdbs_timed_out = True
if line == 'CEGAR_PDBs: task solved during computation of abstract solutions':
single_cegar_pdbs_solved_without_search = True
props['single_cegar_pdbs_timed_out'] = single_cegar_pdbs_timed_out
props['single_cegar_pdbs_solved_without_search'] = single_cegar_pdbs_solved_without_search
parser.add_function(parse_lines)
parser.parse()
| 1,241 |
Python
| 48.679998 | 147 | 0.720387 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v13-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v13"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=1
CONFIGS = []
for random_seed in range(2018, 2028):
### ipdb
CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])),
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
exp.add_parse_again_step()
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', cegar_num_iterations,
cegar_num_patterns, cegar_total_pdb_size, cegar_computation_time],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v8c", "issue1007-v13"),
],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', cegar_num_iterations,
cegar_num_patterns, cegar_total_pdb_size, cegar_computation_time],
)
exp.run_steps()
| 5,167 |
Python
| 47.754717 | 808 | 0.732727 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v4-b-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v4-b"]
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_fetcher('data/issue1007-v4-a-multiple-seeds-eval',merge=True,filter_algorithm=[
'issue1007-v4-a-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v4-a-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
])
exp._revisions=["issue1007-v4-a", "issue1007-v4-b"]
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump']
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher('data/issue1007-v4-b-multiple-seeds-eval/average', merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step(
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump']
)
exp.run_steps()
| 4,392 |
Python
| 48.35955 | 808 | 0.740893 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v4-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v4"]
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.run_steps()
| 3,284 |
Python
| 51.98387 | 808 | 0.75335 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v5-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v5"]
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
### compare against v4
exp.add_fetcher('data/issue1007-v4-multiple-seeds-eval',merge=True,filter_algorithm=[
'issue1007-v4-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v4-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump'],
filter_algorithm=[
'issue1007-v4-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v4-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v5-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v5-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
],
),
outfile=os.path.join(exp.eval_dir, "average-v4-v5", "properties"),
name="report-average-v4-v5"
)
exp.add_fetcher('data/issue1007-v5-multiple-seeds-eval/average-v4-v5', merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step(
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump'],
name="compare-v4-v5",
revisions=["issue1007-v4", "issue1007-v5"],
)
### compare against v4-b
exp.add_fetcher('data/issue1007-v4-b-multiple-seeds-eval',merge=True,filter_algorithm=[
'issue1007-v4-b-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v4-b-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump'],
filter_algorithm=[
'issue1007-v4-b-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v4-b-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v5-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v5-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
],
),
outfile=os.path.join(exp.eval_dir, "average-v4-b-v5", "properties"),
name="report-average-v4-b-v5"
)
exp.add_fetcher('data/issue1007-v5-multiple-seeds-eval/average-v4-b-v5', merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step(
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump'],
name="compare-v4-b-v5",
revisions=["issue1007-v4-b", "issue1007-v5"],
)
exp.run_steps()
| 7,036 |
Python
| 49.992753 | 808 | 0.719301 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v18-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import math
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v17", "issue1007-v18"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False)
attributes = [
cpdbs_num_patterns,
cpdbs_total_pdb_size,
cpdbs_computation_time,
score_cpdbs_computation_time,
cegar_num_iterations,
cegar_num_patterns,
cegar_total_pdb_size,
cegar_computation_time,
score_cegar_computation_time,
]
attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES)
attributes.append('initial_h_value')
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score])
exp.add_comparison_table_step(
attributes=attributes,
filter=[add_computation_time_score],
)
exp.run_steps()
| 7,019 |
Python
| 54.714285 | 808 | 0.751674 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v10-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v10"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=1
CONFIGS = []
for random_seed in range(2018, 2028):
### ipdb
CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])),
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v8c", "issue1007-v10"),
],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
)
exp.run_steps()
| 4,496 |
Python
| 46.840425 | 808 | 0.727536 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v3-single-cegar-wildcard.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v2", "issue1007-v3"]
CONFIGS = []
for random_seed in range(2018, 2028):
CONFIGS.append(IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('v3-parser.py')
attributes=exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend([
'single_cegar_pdbs_solved_without_search',
'single_cegar_pdbs_computation_time',
'single_cegar_pdbs_timed_out',
'single_cegar_pdbs_num_iterations',
'single_cegar_pdbs_collection_num_patterns',
'single_cegar_pdbs_collection_summed_pdb_size',
])
exp.add_absolute_report_step()
report = AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
directory=os.path.join('data', exp.name + '-average-eval'),
attributes=['coverage', 'single_cegar_pdbs_solved_without_search',
'single_cegar_pdbs_computation_time', 'search_time', 'total_time',
'expansions_until_last_jump']
)
outfile = os.path.join(exp.eval_dir, "dummy.txt")
exp.add_report(report, outfile=outfile, name="report-average")
exp.run_steps()
| 2,897 |
Python
| 39.249999 | 414 | 0.751467 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v7-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v7"]
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},total_max_time=20,stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
### compare against v6
exp.add_fetcher('data/issue1007-v6-multiple-seeds-eval',merge=True,filter_algorithm=[
'issue1007-v6-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v6-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump'],
filter_algorithm=[
'issue1007-v6-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v6-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v7-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v7-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
],
),
outfile=os.path.join(exp.eval_dir, "average-v6-v7", "properties"),
name="report-average-v6-v7"
)
exp.add_fetcher('data/issue1007-v7-multiple-seeds-eval/average-v6-v7', merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step(
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump'],
name="compare-v6-v7",
revisions=["issue1007-v6", "issue1007-v7"],
)
exp.run_steps()
| 5,086 |
Python
| 49.87 | 808 | 0.729257 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/cegar-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('cegar_num_iterations', 'CEGAR number of iterations: (\d+)', required=False, type=int)
parser.add_pattern('cegar_num_patterns', 'CEGAR number of patterns: (\d+)', required=False, type=int)
parser.add_pattern('cegar_total_pdb_size', 'CEGAR total PDB size: (\d+)', required=False, type=int)
parser.add_pattern('cegar_computation_time', 'CEGAR computation time: (.+)s', required=False, type=float)
parser.parse()
| 503 |
Python
| 40.999997 | 105 | 0.725646 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v14-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import math
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v14"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=1
CONFIGS = []
for random_seed in range(2018, 2028):
### ipdb
CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])),
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False)
exp.add_absolute_report_step(attributes=['coverage'])
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=[
'coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', 'score_expansions',
'initial_h_value', cpdbs_num_patterns,
cpdbs_total_pdb_size, cpdbs_computation_time,
score_cpdbs_computation_time, cegar_num_iterations,
cegar_num_patterns, cegar_total_pdb_size,
cegar_computation_time, score_cegar_computation_time,
],
filter=[add_computation_time_score],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
(f"{REVISIONS[0]}", f"{REVISIONS[1]}"),
],
attributes=[
'coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', 'score_expansions',
'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size,
cpdbs_computation_time, score_cpdbs_computation_time,
cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size,
cegar_computation_time, score_cegar_computation_time,
],
)
exp.run_steps()
| 7,043 |
Python
| 47.916666 | 808 | 0.714184 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v7c-v8.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v7c", "issue1007-v8"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v7c", "issue1007-v8"),
],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
)
exp.run_steps()
| 4,125 |
Python
| 44.844444 | 808 | 0.728242 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v1.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-base", "issue1007-v1"]
random_seed=2018
CONFIGS = [
### single cegar
IssueConfig('cpdbs-single-cegar-allgoals-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=false,initial=all_goals,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=silent)))'.format(random_seed)]),
IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=true,initial=all_goals,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=silent)))'.format(random_seed)]),
### multiple cegar
IssueConfig('cpdbs-multiple-cegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)))".format(random_seed)]),
IssueConfig('cpdbs-multiple-cegar-regularplans-pdb1m-pdbs10m-t100-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)))".format(random_seed)]),
IssueConfig('cpdbs-multiple-cegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)))".format(random_seed)]),
IssueConfig('cpdbs-multiple-cegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)))".format(random_seed)]),
IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)))".format(random_seed)]),
IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)))".format(random_seed)]),
IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)))".format(random_seed)]),
IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)))".format(random_seed)]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 6,096 |
Python
| 91.378786 | 466 | 0.784121 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v16-v17-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import math
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v16", "issue1007-v17"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=1
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False)
exp.add_absolute_report_step(attributes=['coverage'])
### compare against v15
exp.add_fetcher('data/issue1007-v15-multiple-seeds-eval',merge=True,filter_algorithm=[
'issue1007-v15-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v15-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
])
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=[
'coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', 'score_expansions',
'initial_h_value', cpdbs_num_patterns,
cpdbs_total_pdb_size, cpdbs_computation_time,
score_cpdbs_computation_time, cegar_num_iterations,
cegar_num_patterns, cegar_total_pdb_size,
cegar_computation_time, score_cegar_computation_time,
],
filter=[add_computation_time_score],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v15", "issue1007-v16"),
("issue1007-v15", "issue1007-v17"),
("issue1007-v16", "issue1007-v17"),
],
attributes=[
'coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', 'score_expansions',
'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size,
cpdbs_computation_time, score_cpdbs_computation_time,
cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size,
cegar_computation_time, score_cegar_computation_time,
],
filter=[add_computation_time_score],
)
exp.run_steps()
| 7,218 |
Python
| 47.126666 | 808 | 0.715295 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v16-v17-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import math
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v16", "issue1007-v17"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False)
attributes = [
cpdbs_num_patterns,
cpdbs_total_pdb_size,
cpdbs_computation_time,
score_cpdbs_computation_time,
cegar_num_iterations,
cegar_num_patterns,
cegar_total_pdb_size,
cegar_computation_time,
score_cegar_computation_time,
]
attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES)
attributes.append('initial_h_value')
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score])
exp.add_fetcher('data/issue1007-v15-fixed-seed-eval', filter_algorithm=[
f'issue1007-v15-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v15-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v15-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v15-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v15-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}',
f'issue1007-v15-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}',
],merge=True)
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v15", "issue1007-v16"),
("issue1007-v15", "issue1007-v17"),
("issue1007-v16", "issue1007-v17"),
],
attributes=attributes,
filter=[add_computation_time_score],
)
exp.run_steps()
| 7,861 |
Python
| 55.157142 | 808 | 0.750286 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v3-single-cegar-wildcard-average.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
REVISIONS = ["issue1007-v2", "issue1007-v3"]
CONFIGS = [
IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100', []),
]
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
)
exp.add_comparison_table_step(
attributes=['coverage', 'single_cegar_pdbs_solved_without_search',
'single_cegar_pdbs_computation_time', 'search_time', 'total_time',
'expansions_until_last_jump']
)
exp.run_steps()
| 858 |
Python
| 25.030302 | 84 | 0.754079 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v7-v8-v8b.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v7", "issue1007-v8", "issue1007-v8b"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v7", "issue1007-v8"),
("issue1007-v8", "issue1007-v8b"),
],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
)
exp.run_steps()
| 4,183 |
Python
| 44.978021 | 808 | 0.726034 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v9-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v9"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### ipdb
IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']),
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 4,869 |
Python
| 64.81081 | 808 | 0.763401 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v15-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import math
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v15"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=1
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False)
exp.add_absolute_report_step(attributes=['coverage'])
### compare against v14
exp.add_fetcher('data/issue1007-v14-multiple-seeds-eval',merge=True,filter_algorithm=[
'issue1007-v14-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{}'.format(random_seed) for random_seed in range(2018, 2028)
] + [
'issue1007-v14-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028)
])
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=[
'coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', 'score_expansions',
'initial_h_value', cpdbs_num_patterns,
cpdbs_total_pdb_size, cpdbs_computation_time,
score_cpdbs_computation_time, cegar_num_iterations,
cegar_num_patterns, cegar_total_pdb_size,
cegar_computation_time, score_cegar_computation_time,
],
filter=[add_computation_time_score],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v14", "issue1007-v15"),
],
attributes=[
'coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory', 'score_expansions',
'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size,
cpdbs_computation_time, score_cpdbs_computation_time,
cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size,
cegar_computation_time, score_cegar_computation_time,
],
filter=[add_computation_time_score],
)
exp.run_steps()
| 7,113 |
Python
| 47.067567 | 808 | 0.717419 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v6-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v5", "issue1007-v6"]
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump']
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher('data/issue1007-v6-multiple-seeds-eval/average', merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step(
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump']
)
exp.run_steps()
| 3,967 |
Python
| 47.390243 | 808 | 0.739854 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v2-best-average.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
REVISIONS = ["issue1007-v1", "issue1007-v2"]
CONFIGS = [
IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20', []),
]
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
)
exp.add_comparison_table_step(
attributes=['coverage', 'search_time', 'total_time', 'expansions_until_last_jump']
)
exp.run_steps()
| 783 |
Python
| 24.290322 | 98 | 0.759898 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/average_report.py
|
# -*- coding: utf-8 -*-
from downward.reports import PlanningReport
from lab import tools
from lab.reports import geometric_mean
import os
DEBUG=False
class AverageAlgorithmReport(PlanningReport):
"""
This currently only works for some hard-coded attributes.
"""
def __init__(self, algo_name_suffixes, **kwargs):
PlanningReport.__init__(self, **kwargs)
self.algo_name_suffixes=algo_name_suffixes
def get_text(self):
if not self.outfile.endswith("properties"):
raise ValueError("outfile must be a path to a properties file")
algo_infixes = set()
for algo in self.algorithms:
for suffix in self.algo_name_suffixes:
if suffix in algo:
algo_infixes.add(algo.replace(suffix, ''))
break
# print(algo_infixes)
# print(self.algo_name_suffixes)
props = tools.Properties(self.outfile)
for domain, problem in self.problem_runs.keys():
if DEBUG:
print(domain, problem)
for algo in algo_infixes:
if DEBUG:
print("Consider ", algo)
properties_key = algo + '-' + domain + '-' + problem
average_algo_dict = {}
average_algo_dict['algorithm'] = algo
average_algo_dict['domain'] = domain
average_algo_dict['problem'] = problem
average_algo_dict['id'] = [algo, domain, problem]
for attribute in self.attributes:
if DEBUG:
print("Consider ", attribute)
values = []
for suffix in self.algo_name_suffixes:
real_algo = algo + suffix
# if DEBUG:
# print("Composed algo ", real_algo)
real_algo_run = self.runs[(domain, problem, real_algo)]
values.append(real_algo_run.get(attribute))
if DEBUG:
print(values)
values_without_none = [value for value in values if value is not None]
if attribute in [
'coverage', 'cegar_num_iterations',
'cegar_num_patterns',
'cegar_total_pdb_size', 'initial_h_value'
'coverage', 'initial_h_value',
'cpdbs_num_patterns', 'cpdbs_total_pdb_size',
'cegar_num_iterations', 'cegar_num_patterns',
'cegar_total_pdb_size',
] or 'score' in attribute:
# if 'score' not in attribute:
# assert len(values_without_none) == 10 # does not hold for scores
average_value = sum(values_without_none)/float(len(values))
elif 'time' in attribute or 'expansions' in attribute:
if len(values_without_none) == 10:
average_value = geometric_mean(values_without_none)
else:
average_value = None
else:
print("Don't know how to handle {}".format(attribute))
exit(1)
average_algo_dict[attribute] = average_value
props[properties_key] = average_algo_dict
return str(props)
| 3,535 |
Python
| 43.759493 | 94 | 0.490523 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v13-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v13"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### ipdb
IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']),
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
attributes = [
cpdbs_num_patterns,
cpdbs_total_pdb_size,
cpdbs_computation_time,
cegar_num_iterations,
cegar_num_patterns,
cegar_total_pdb_size,
cegar_computation_time,
]
attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES)
attributes.append('initial_h_value')
exp.add_parse_again_step()
exp.add_absolute_report_step(attributes=attributes)
outfile = os.path.join(
exp.eval_dir,
f"{exp.name}-{REVISIONS[0]}-{REVISIONS[1]}-compare-hillclimbing.html")
name="make-comparison-tables-hillclimbing"
exp.add_report(
ComparativeReport(
[
(f'{REVISIONS[0]}-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}'),
],
attributes=attributes,
),
name=name,
outfile=outfile,
)
exp.add_step(
f"publish-{name}",
subprocess.call,
["publish", outfile],
)
outfile = os.path.join(
exp.eval_dir,
f"{exp.name}-{REVISIONS[0]}-{REVISIONS[1]}-compare-cegar.html")
name="make-comparison-tables-cegar"
exp.add_report(
ComparativeReport(
[
(f'{REVISIONS[0]}-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}'),
(f'{REVISIONS[0]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}'),
],
attributes=attributes,
),
name=name,
outfile=outfile,
)
exp.add_step(
f"publish-{name}",
subprocess.call,
["publish", outfile],
)
exp.run_steps()
| 8,017 |
Python
| 54.680555 | 808 | 0.743794 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v11-v12-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8c", "issue1007-v10", "issue1007-v11", "issue1007-v12"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### ipdb
IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']),
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v10", "issue1007-v11"),
("issue1007-v11", "issue1007-v12"),
("issue1007-v8c", "issue1007-v12"),
],
)
exp.run_steps()
| 5,084 |
Python
| 62.562499 | 808 | 0.755901 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v2-best.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v1", "issue1007-v2"]
CONFIGS = []
for random_seed in range(2018, 2028):
CONFIGS.append(IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)]))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
report = AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
directory=os.path.join('data', exp.name + '-average-eval'),
attributes=['coverage', 'search_time', 'total_time', 'expansions_until_last_jump'])
outfile = os.path.join(exp.eval_dir, "dummy.txt")
exp.add_report(report, outfile=outfile, name="report-average")
exp.run_steps()
| 2,576 |
Python
| 42.677965 | 498 | 0.760481 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v3.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v2", "issue1007-v3"]
random_seed=2018
CONFIGS = [
### single cegar
IssueConfig('cpdbs-single-cegar-allgoals-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=silent)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=silent)),verbosity=silent)'.format(random_seed)]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 2,399 |
Python
| 42.636363 | 414 | 0.764068 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v11-v12-multiple-seeds.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v11", "issue1007-v12"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=1
CONFIGS = []
for random_seed in range(2018, 2028):
### ipdb
CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])),
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v11", "issue1007-v12"),
],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
)
exp.run_steps()
| 4,496 |
Python
| 46.840425 | 808 | 0.727536 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v8b-v8c-v8d.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8b", "issue1007-v8c", "issue1007-v8d"]
MAX_TIME=20
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = []
for random_seed in range(2018, 2028):
### single cegar
CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
### multiple cegar
CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m']))
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['coverage'])
exp.add_report(
AverageAlgorithmReport(
algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
),
outfile=os.path.join(exp.eval_dir, "average", "properties"),
name="report-average"
)
exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True)
exp._configs = [
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []),
]
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v8b", "issue1007-v8c"),
("issue1007-v8c", "issue1007-v8d"),
],
attributes=['coverage', 'search_time', 'total_time',
'expansions_until_last_jump', 'score_search_time',
'score_total_time', 'score_memory'],
)
exp.run_steps()
| 4,188 |
Python
| 45.032967 | 808 | 0.726361 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v8b-v8c-v8d-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v8b", "issue1007-v8c", "issue1007-v8d"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 4,640 |
Python
| 64.366196 | 808 | 0.765086 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v15-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import math
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v15"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
exp.add_parser('cegar-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True)
cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True)
cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True)
cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True)
score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False)
attributes = [
cpdbs_num_patterns,
cpdbs_total_pdb_size,
cpdbs_computation_time,
score_cpdbs_computation_time,
cegar_num_iterations,
cegar_num_patterns,
cegar_total_pdb_size,
cegar_computation_time,
score_cegar_computation_time,
]
attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES)
attributes.append('initial_h_value')
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score])
exp.add_fetcher('data/issue1007-v14-fixed-seed-eval', filter_algorithm=[
f'issue1007-v14-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v14-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v14-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v14-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v14-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}',
f'issue1007-v14-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}',
],merge=True)
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v14", "issue1007-v15"),
],
attributes=attributes,
filter=[add_computation_time_score],
)
exp.run_steps()
| 7,756 |
Python
| 55.210145 | 808 | 0.752708 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v15-ipdb-sys.py
|
#! /usr/bin/env python3
import itertools
import math
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-base-v2", "issue1007-v15"]
random_seed=2018
MAX_TIME=900
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']),
IssueConfig('cpdbs-sys2', ['--search', 'astar(cpdbs(systematic(pattern_max_size=2)),verbosity=silent)']),
IssueConfig('cpdbs-sys3', ['--search', 'astar(cpdbs(systematic(pattern_max_size=3)),verbosity=silent)']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
attributes = [
cpdbs_num_patterns,
cpdbs_total_pdb_size,
cpdbs_computation_time,
score_cpdbs_computation_time,
]
attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES)
attributes.append('initial_h_value')
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score])
exp.add_comparison_table_step(attributes=attributes,filter=[add_computation_time_score])
exp.run_steps()
| 4,328 |
Python
| 40.625 | 808 | 0.729205 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v7c-v8-fixed-seed.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v7c", "issue1007-v8"]
random_seed=2018
MAX_TIME=100
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
### single cegar
IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']),
### multiple cegar
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]),
IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 4,622 |
Python
| 64.112675 | 808 | 0.765253 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.