file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/doc_utils.cc
|
#include "doc_utils.h"
#include "option_parser.h"
#include "predefinitions.h"
using namespace std;
namespace options {
PluginTypeInfo::PluginTypeInfo(
const type_index &type,
const string &type_name,
const string &documentation,
const string &predefinition_key,
const string &alias,
const PredefinitionFunction &predefinition_function)
: type(type),
type_name(type_name),
documentation(documentation),
predefinition_key(predefinition_key),
alias(alias),
predefinition_function(predefinition_function) {
}
bool PluginTypeInfo::operator<(const PluginTypeInfo &other) const {
return make_pair(type_name, type) < make_pair(other.type_name, other.type);
}
}
| 719 |
C++
| 24.714285 | 79 | 0.716273 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/doc_printer.h
|
#ifndef OPTIONS_DOC_PRINTER_H
#define OPTIONS_DOC_PRINTER_H
#include "registries.h"
#include <iostream>
#include <string>
#include <vector>
namespace options {
struct PluginInfo;
class Registry;
class DocPrinter {
virtual void print_category(const std::string &plugin_type_name,
const std::string &synopsis,
const std::string &predefinition_key,
const std::string &alias);
virtual void print_section(const std::string &group, const std::vector<PluginInfo> &infos);
virtual void print_plugin(const std::string &name, const PluginInfo &info);
protected:
std::ostream &os;
Registry ®istry;
virtual void print_synopsis(const PluginInfo &info) = 0;
virtual void print_usage(const std::string &name, const PluginInfo &info) = 0;
virtual void print_arguments(const PluginInfo &info) = 0;
virtual void print_notes(const PluginInfo &info) = 0;
virtual void print_language_features(const PluginInfo &info) = 0;
virtual void print_properties(const PluginInfo &info) = 0;
virtual void print_category_header(const std::string &category_name) = 0;
virtual void print_category_synopsis(const std::string &synopsis) = 0;
virtual void print_category_predefinitions(
const std::string &predefinition_key, const std::string &alias) = 0;
virtual void print_category_footer() = 0;
public:
DocPrinter(std::ostream &out, Registry ®istry);
virtual ~DocPrinter();
void print_all();
void print_plugin(const std::string &name);
};
class Txt2TagsPrinter : public DocPrinter {
protected:
virtual void print_synopsis(const PluginInfo &info) override;
virtual void print_usage(const std::string &name, const PluginInfo &info) override;
virtual void print_arguments(const PluginInfo &info) override;
virtual void print_notes(const PluginInfo &info) override;
virtual void print_language_features(const PluginInfo &info) override;
virtual void print_properties(const PluginInfo &info) override;
virtual void print_category_header(const std::string &category_name) override;
virtual void print_category_synopsis(const std::string &synopsis) override;
virtual void print_category_predefinitions(
const std::string &predefinition_key, const std::string &alias) override;
virtual void print_category_footer() override;
public:
Txt2TagsPrinter(std::ostream &out, Registry ®istry);
};
class PlainPrinter : public DocPrinter {
// If this is false, notes, properties and language_features are omitted.
bool print_all;
protected:
virtual void print_synopsis(const PluginInfo &info) override;
virtual void print_usage(const std::string &name, const PluginInfo &info) override;
virtual void print_arguments(const PluginInfo &info) override;
virtual void print_notes(const PluginInfo &info) override;
virtual void print_language_features(const PluginInfo &info) override;
virtual void print_properties(const PluginInfo &info) override;
virtual void print_category_header(const std::string &category_name) override;
virtual void print_category_synopsis(const std::string &synopsis) override;
virtual void print_category_predefinitions(
const std::string &predefinition_key, const std::string &alias) override;
virtual void print_category_footer() override;
public:
PlainPrinter(std::ostream &out, Registry ®istry, bool print_all = false);
};
}
#endif
| 3,526 |
C
| 38.188888 | 95 | 0.718378 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/any.h
|
#ifndef OPTIONS_ANY_H
#define OPTIONS_ANY_H
#include "../utils/memory.h"
#include <algorithm>
#include <exception>
#include <memory>
#include <typeinfo>
/*
Poor man's version of boost::any, mostly copied from there.
Does not support
- construction from literals (e.g., "Any a(3);")
- moving
- references as values
- perfect forwarding.
These features can be added if needed (see boost::any).
Note that we don't use Boost's version of Any mainly because it would
require adding lots of files.
*/
namespace options {
class Any {
class Placeholder {
public:
virtual ~Placeholder() {}
virtual std::unique_ptr<Placeholder> clone() const = 0;
virtual const std::type_info &type() const = 0;
};
template<typename ValueType>
class Holder : public Placeholder {
Holder &operator=(const Holder &) = delete;
public:
ValueType held;
Holder(const ValueType &value)
: held(value) {
}
virtual std::unique_ptr<Placeholder> clone() const {
return utils::make_unique_ptr<Holder<ValueType>>(held);
}
virtual const std::type_info &type() const {
return typeid(ValueType);
}
};
template<typename ValueType>
friend ValueType *any_cast(Any *);
std::unique_ptr<Placeholder> content;
public:
Any() : content(nullptr) {
}
Any(const Any &other)
: content(other.content ? other.content->clone() : nullptr) {
}
template<typename ValueType>
Any(const ValueType &value)
: content(utils::make_unique_ptr<Holder<ValueType>>(value)) {
}
~Any() = default;
template<typename ValueType>
Any &operator=(const ValueType &rhs) {
Any(rhs).swap(*this);
return *this;
}
Any &operator=(const Any &rhs) {
Any copied(rhs);
copied.swap(*this);
return *this;
}
Any &swap(Any &rhs) {
std::swap(content, rhs.content);
return *this;
}
const std::type_info &type() const {
return content ? content->type() : typeid(void);
}
};
class BadAnyCast : public std::bad_cast {
public:
virtual const char *what() const noexcept override {
return "BadAnyCast: failed conversion using any_cast";
}
};
template<typename ValueType>
ValueType *any_cast(Any *operand) {
if (operand && operand->type() == typeid(ValueType))
return &static_cast<Any::Holder<ValueType> *>(operand->content.get())->held;
else
return nullptr;
}
template<typename ValueType>
inline const ValueType *any_cast(const Any *operand) {
return any_cast<ValueType>(const_cast<Any *>(operand));
}
template<typename ValueType>
ValueType any_cast(Any &operand) {
ValueType *result = any_cast<ValueType>(&operand);
if (!result)
throw BadAnyCast();
return *result;
}
template<typename ValueType>
inline ValueType any_cast(const Any &operand) {
return any_cast<const ValueType>(const_cast<Any &>(operand));
}
}
/*
This source file was derived from the boost::any library versions 1.45 by
Kevlin Henney. Original copyright statement and license for this original
source follow.
Copyright Kevlin Henney, 2000, 2001, 2002. All rights reserved.
Distributed under the Boost Software License, Version 1.0.
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#endif
| 4,671 |
C
| 27.662577 | 84 | 0.68936 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/type_namer.h
|
#ifndef OPTIONS_TYPE_NAMER_H
#define OPTIONS_TYPE_NAMER_H
#include "parse_tree.h"
#include "registries.h"
#include <memory>
#include <string>
#include <typeindex>
namespace options {
/*
TypeNamer prints out names of types.
There is no default implementation for TypeNamer<T>::name: the template needs
to be specialized for each type we want to support. However, we have a
generic version below for shared_ptr<...> types, which are the ones we use
for plugins.
*/
template<typename T>
struct TypeNamer {
static std::string name(const Registry ®istry);
};
/*
Note: for plug-in types, we use TypeNamer<shared_ptr<T>>::name. One might be
tempted to strip away the shared_ptr<...> here and use TypeNamer<T>::name
instead, but this has the disadvantage that typeid(T) requires T to be a
complete type, while typeid(shared_ptr<T>) also accepts incomplete types.
*/
template<typename T>
struct TypeNamer<std::shared_ptr<T>> {
static std::string name(const Registry ®istry) {
using TPtr = std::shared_ptr<T>;
const PluginTypeInfo &type_info =
registry.get_type_info(std::type_index(typeid(TPtr)));
return type_info.type_name;
}
};
template<>
struct TypeNamer<int> {
static std::string name(const Registry &) {
return "int";
}
};
template<>
struct TypeNamer<bool> {
static std::string name(const Registry &) {
return "bool";
}
};
template<>
struct TypeNamer<double> {
static std::string name(const Registry &) {
return "double";
}
};
template<>
struct TypeNamer<std::string> {
static std::string name(const Registry &) {
return "string";
}
};
template<>
struct TypeNamer<ParseTree> {
static std::string name(const Registry &) {
return "ParseTree (this just means the input is parsed at a later point."
" The real type is probably a search engine.)";
}
};
template<typename T>
struct TypeNamer<std::vector<T>> {
static std::string name(const Registry ®istry) {
return "list of " + TypeNamer<T>::name(registry);
}
};
}
#endif
| 2,112 |
C
| 23.569767 | 81 | 0.66714 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/raw_registry.cc
|
#include "raw_registry.h"
using namespace std;
namespace options {
RawPluginInfo::RawPluginInfo(
const string &key,
const Any &factory,
const string &group,
const PluginTypeNameGetter &type_name_factory,
const DocFactory &doc_factory,
const type_index &type)
: key(key),
factory(factory),
group(group),
type_name_factory(type_name_factory),
doc_factory(doc_factory),
type(type) {
}
void RawRegistry::insert_plugin_type_data(
type_index type, const string &type_name, const string &documentation,
const string &predefinition_key, const string &alias,
const PredefinitionFunction &predefinition_function) {
plugin_types.emplace_back(type, type_name, documentation, predefinition_key,
alias, predefinition_function);
}
void RawRegistry::insert_plugin_group_data(
const string &group_id, const string &doc_title) {
plugin_groups.emplace_back(group_id, doc_title);
}
void RawRegistry::insert_plugin_data(
const string &key,
const Any &factory,
const string &group,
PluginTypeNameGetter &type_name_factory,
DocFactory &doc_factory,
type_index &type) {
plugins.emplace_back(key, factory, group, type_name_factory, doc_factory,
type);
}
const vector<PluginTypeInfo> &RawRegistry::get_plugin_type_data() const {
return plugin_types;
}
const vector<PluginGroupInfo> &RawRegistry::get_plugin_group_data() const {
return plugin_groups;
}
const vector<RawPluginInfo> &RawRegistry::get_plugin_data() const {
return plugins;
}
}
| 1,599 |
C++
| 27.070175 | 80 | 0.68793 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/errors.h
|
#ifndef OPTIONS_ERRORS_H
#define OPTIONS_ERRORS_H
#include "parse_tree.h"
#include "../utils/exceptions.h"
#include <string>
#define ABORT_WITH_DEMANGLING_HINT(msg, type_name) \
( \
(std::cerr << "Critical error in file " << __FILE__ \
<< ", line " << __LINE__ << ": " << std::endl \
<< (msg) << std::endl), \
(std::cerr << options::get_demangling_hint(type_name) << std::endl), \
(abort()), \
(void)0 \
)
namespace options {
class OptionParserError : public utils::Exception {
std::string msg;
public:
explicit OptionParserError(const std::string &msg);
virtual void print() const override;
};
class ParseError : public utils::Exception {
std::string msg;
ParseTree parse_tree;
std::string substring;
public:
ParseError(const std::string &error, const ParseTree &parse_tree,
const std::string &substring = "");
virtual void print() const override;
};
extern std::string get_demangling_hint(const std::string &type_name);
}
#endif
| 1,068 |
C
| 22.23913 | 78 | 0.60206 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/plugin.h
|
#ifndef OPTIONS_PLUGIN_H
#define OPTIONS_PLUGIN_H
#include "doc_utils.h"
#include "option_parser.h"
#include "raw_registry.h"
#include "type_namer.h"
#include "../utils/strings.h"
#include "../utils/system.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <string>
#include <typeindex>
#include <typeinfo>
#include <utility>
#include <vector>
namespace options {
class Registry;
/*
The following function is not meant for users, but only for the
plugin implementation. We only declare it here because the template
implementations need it.
*/
extern void register_plugin_type_plugin(
const std::type_info &type,
const std::string &type_name,
const std::string &documentation);
template<typename T>
class PluginTypePlugin {
public:
PluginTypePlugin(
const std::string &type_name,
const std::string &documentation,
const std::string &predefinition_key = "",
const std::string &alias = "") {
using TPtr = std::shared_ptr<T>;
assert(!predefinition_key.empty() || alias.empty());
assert(!utils::startswith(predefinition_key, "--"));
assert(!utils::startswith(alias, "--"));
PredefinitionFunction predefinition_function = predefinition_key.empty() ?
nullptr : predefine_plugin<T>;
RawRegistry::instance()->insert_plugin_type_data(
std::type_index(typeid(TPtr)), type_name, documentation,
predefinition_key, alias, predefinition_function);
}
~PluginTypePlugin() = default;
PluginTypePlugin(const PluginTypePlugin &other) = delete;
};
class PluginGroupPlugin {
public:
PluginGroupPlugin(const std::string &group_id,
const std::string &doc_title);
~PluginGroupPlugin() = default;
PluginGroupPlugin(const PluginGroupPlugin &other) = delete;
};
template<typename T>
class Plugin {
public:
Plugin(
const std::string &key,
typename std::function<std::shared_ptr<T>(OptionParser &)> factory,
const std::string &group = "") {
using TPtr = std::shared_ptr<T>;
PluginTypeNameGetter type_name_factory = [&](const Registry ®istry) {
return TypeNamer<TPtr>::name(registry);
};
DocFactory doc_factory = [factory](OptionParser &parser) {
factory(parser);
};
std::type_index type(typeid(TPtr));
RawRegistry::instance()->insert_plugin_data(
key, factory, group, type_name_factory, doc_factory,
type);
}
~Plugin() = default;
Plugin(const Plugin<T> &other) = delete;
};
}
#endif
| 2,638 |
C
| 26.489583 | 82 | 0.643669 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/errors.cc
|
#include "errors.h"
using namespace std;
namespace options {
OptionParserError::OptionParserError(const string &msg)
: msg(msg) {
}
void OptionParserError::print() const {
cerr << "option parser error: " << msg << endl;
}
ParseError::ParseError(
const string &msg, const ParseTree &parse_tree, const string &substring)
: msg(msg),
parse_tree(parse_tree),
substring(substring) {
}
void ParseError::print() const {
cerr << "parse error: " << endl
<< msg << " at: " << endl;
kptree::print_tree_bracketed<ParseNode>(parse_tree, cerr);
if (!substring.empty()) {
cerr << " (cannot continue parsing after \"" << substring << "\")";
}
cerr << endl;
}
string get_demangling_hint(const string &type_name) {
return "To retrieve the demangled C++ type for gcc/clang, you can call \n"
"c++filt -t " + type_name;
}
}
| 891 |
C++
| 22.473684 | 78 | 0.617284 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/options.h
|
#ifndef OPTIONS_OPTIONS_H
#define OPTIONS_OPTIONS_H
#include "any.h"
#include "errors.h"
#include "type_namer.h"
#include "../utils/system.h"
#include <string>
#include <typeinfo>
#include <unordered_map>
namespace options {
// Wrapper for unordered_map<string, Any>.
class Options {
std::unordered_map<std::string, Any> storage;
std::string unparsed_config;
const bool help_mode;
public:
explicit Options(bool help_mode = false);
template<typename T>
void set(const std::string &key, T value) {
storage[key] = value;
}
template<typename T>
T get(const std::string &key) const {
const auto it = storage.find(key);
if (it == storage.end()) {
ABORT_WITH_DEMANGLING_HINT(
"Attempt to retrieve nonexisting object of name " + key +
" (type: " + typeid(T).name() + ")", typeid(T).name());
}
try {
T result = any_cast<T>(it->second);
return result;
} catch (const BadAnyCast &) {
ABORT_WITH_DEMANGLING_HINT(
"Invalid conversion while retrieving config options!\n" +
key + " is not of type " + typeid(T).name(), typeid(T).name());
}
}
template<typename T>
T get(const std::string &key, const T &default_value) const {
if (storage.count(key))
return get<T>(key);
else
return default_value;
}
template<typename T>
void verify_list_non_empty(const std::string &key) const {
if (!help_mode) {
if (get_list<T>(key).empty()) {
throw OptionParserError("Error: list for key " +
key + " must not be empty\n");
}
}
}
template<typename T>
std::vector<T> get_list(const std::string &key) const {
return get<std::vector<T>>(key);
}
bool contains(const std::string &key) const;
const std::string &get_unparsed_config() const;
void set_unparsed_config(const std::string &config);
};
}
#endif
| 2,081 |
C
| 26.038961 | 79 | 0.56271 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/parse_tree.h
|
#ifndef OPTIONS_PARSE_TREE_H
#define OPTIONS_PARSE_TREE_H
#include <iostream>
#include <string>
#include <tree.hh>
#include <tree_util.hh>
namespace options {
struct ParseNode {
std::string value;
std::string key;
ParseNode()
: value(""),
key("") {
}
ParseNode(const std::string &value, const std::string &key = "")
: value(value),
key(key) {
}
friend std::ostream &operator<<(std::ostream &out, const ParseNode &parse_node) {
if (!parse_node.key.empty())
out << parse_node.key << " = ";
out << parse_node.value;
return out;
}
};
using ParseTree = tree<ParseNode>;
// Helper functions for the ParseTree.
template<class T>
typename tree<T>::sibling_iterator last_child(
const tree<T> &parse_tree, typename tree<T>::sibling_iterator tree_it) {
return --parse_tree.end(tree_it);
}
template<class T>
typename tree<T>::sibling_iterator last_child_of_root(const tree<T> &parse_tree) {
return last_child(parse_tree, parse_tree.begin());
}
template<class T>
typename tree<T>::sibling_iterator first_child(
const tree<T> &parse_tree, typename tree<T>::sibling_iterator tree_it) {
return parse_tree.begin(tree_it);
}
template<class T>
typename tree<T>::sibling_iterator first_child_of_root(const tree<T> &parse_tree) {
return first_child(parse_tree, parse_tree.begin());
}
template<class T>
typename tree<T>::sibling_iterator end_of_roots_children(const tree<T> &parse_tree) {
return parse_tree.end(parse_tree.begin());
}
template<class T>
tree<T> subtree(
const tree<T> &parse_tree, typename tree<T>::sibling_iterator tree_it) {
typename tree<T>::sibling_iterator tree_it_next = tree_it;
++tree_it_next;
return parse_tree.subtree(tree_it, tree_it_next);
}
}
#endif
| 1,819 |
C
| 23.594594 | 85 | 0.655305 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/registries.cc
|
#include "registries.h"
#include "errors.h"
#include "option_parser.h"
#include "predefinitions.h"
#include "../utils/collections.h"
#include "../utils/strings.h"
#include <iostream>
#include <typeindex>
using namespace std;
namespace options {
static void print_initialization_errors_and_exit(const vector<string> &errors) {
throw OptionParserError("\n" + utils::join(errors, "\n") + "\n" + get_demangling_hint("[TYPE]"));
}
Registry::Registry(const RawRegistry &raw_registry) {
vector<string> errors;
insert_plugin_types(raw_registry, errors);
insert_plugin_groups(raw_registry, errors);
insert_plugins(raw_registry, errors);
if (!errors.empty()) {
sort(errors.begin(), errors.end());
print_initialization_errors_and_exit(errors);
}
// The documentation generation requires an error free, fully initialized registry.
for (const RawPluginInfo &plugin : raw_registry.get_plugin_data()) {
OptionParser parser(plugin.key, *this, Predefinitions(), true, true);
plugin.doc_factory(parser);
}
}
void Registry::insert_plugin_types(const RawRegistry &raw_registry,
vector<string> &errors) {
unordered_map<string, vector<type_index>> occurrences_names;
unordered_map<type_index, vector<string>> occurrences_types;
unordered_map<string, vector<string>> occurrences_predefinition;
for (const PluginTypeInfo &plugin_type_info :
raw_registry.get_plugin_type_data()) {
occurrences_names[plugin_type_info.type_name].push_back(plugin_type_info.type);
occurrences_types[plugin_type_info.type].push_back(plugin_type_info.type_name);
bool predefine_error = false;
for (const string &predefinition_key :
{plugin_type_info.predefinition_key, plugin_type_info.alias}) {
if (!predefinition_key.empty()) {
occurrences_predefinition[predefinition_key].push_back(
plugin_type_info.type_name);
if (occurrences_predefinition[predefinition_key].size() > 1)
predefine_error = true;
}
}
if (occurrences_names[plugin_type_info.type_name].size() == 1 &&
occurrences_types[plugin_type_info.type].size() == 1 &&
!predefine_error) {
insert_type_info(plugin_type_info);
}
}
for (auto &it : occurrences_names) {
if (it.second.size() > 1) {
errors.push_back(
"Multiple definitions for PluginTypePlugin " + it.first +
" (types: " +
utils::join(utils::map_vector<string>(
it.second,
[](const type_index &type) {return type.name();}),
", ") + ")");
}
}
for (auto &it : occurrences_types) {
if (it.second.size() > 1) {
errors.push_back(
"Multiple definitions for PluginTypePlugin of type " +
string(it.first.name()) +
" (names: " + utils::join(it.second, ", ") + ")");
}
}
for (auto &it : occurrences_predefinition) {
if (it.second.size() > 1) {
errors.push_back("Multiple PluginTypePlugins use the predefinition "
"key " + it.first + " (types: " +
utils::join(it.second, ", ") + ")");
}
}
}
void Registry::insert_plugin_groups(const RawRegistry &raw_registry,
vector<string> &errors) {
unordered_map<string, int> occurrences;
for (const PluginGroupInfo &pgi : raw_registry.get_plugin_group_data()) {
++occurrences[pgi.group_id];
if (occurrences[pgi.group_id] == 1) {
insert_group_info(pgi);
}
}
for (auto &it : occurrences) {
if (it.second > 1) {
errors.push_back("Multiple definitions (" + to_string(it.second) +
") for PluginGroupPlugin " + it.first);
}
}
}
void Registry::insert_plugins(const RawRegistry &raw_registry,
vector<string> &errors) {
unordered_map<string, vector<type_index>> occurrences;
for (const RawPluginInfo &plugin : raw_registry.get_plugin_data()) {
bool error = false;
if (!plugin.group.empty() && !plugin_group_infos.count(plugin.group)) {
errors.push_back(
"No PluginGroupPlugin with name " + plugin.group +
" for Plugin " + plugin.key +
" of type " + plugin.type.name());
error = true;
}
if (!plugin_type_infos.count(plugin.type)) {
errors.push_back(
"No PluginTypePlugin of type " + string(plugin.type.name()) +
" for Plugin " + plugin.key + " (can be spurious if associated "
"PluginTypePlugin is defined multiple times)");
error = true;
}
occurrences[plugin.key].push_back(plugin.type);
if (occurrences[plugin.key].size() != 1) {
// Error message generated below.
error = true;
}
if (!error) {
insert_plugin(plugin.key, plugin.factory, plugin.group,
plugin.type_name_factory, plugin.type);
}
}
for (auto &it : occurrences) {
if (it.second.size() > 1) {
errors.push_back(
"Multiple definitions for Plugin " + it.first + " (types: " +
utils::join(utils::map_vector<string>(
it.second,
[](const type_index &type) {return type.name();}),
", ") +
")");
}
}
}
void Registry::insert_type_info(const PluginTypeInfo &info) {
assert(!plugin_type_infos.count(info.type));
for (const string &predefinition_key : {info.predefinition_key, info.alias}) {
if (!predefinition_key.empty()) {
assert(!is_predefinition(predefinition_key));
predefinition_functions[predefinition_key] = info.predefinition_function;
}
}
plugin_type_infos.insert(make_pair(info.type, info));
}
const PluginTypeInfo &Registry::get_type_info(const type_index &type) const {
if (!plugin_type_infos.count(type)) {
ABORT("attempt to retrieve non-existing type info from registry: " +
string(type.name()));
}
return plugin_type_infos.at(type);
}
vector<PluginTypeInfo> Registry::get_sorted_type_infos() const {
vector<PluginTypeInfo> types;
for (auto &it : plugin_type_infos) {
types.push_back(it.second);
}
sort(types.begin(), types.end());
return types;
}
void Registry::insert_group_info(const PluginGroupInfo &info) {
assert(!plugin_group_infos.count(info.group_id));
plugin_group_infos.insert(make_pair(info.group_id, info));
}
const PluginGroupInfo &Registry::get_group_info(const string &group) const {
if (!plugin_group_infos.count(group)) {
ABORT("attempt to retrieve non-existing group info from registry: " +
string(group));
}
return plugin_group_infos.at(group);
}
void Registry::insert_plugin(
const string &key, const Any &factory,
const string &group,
const PluginTypeNameGetter &type_name_factory,
const type_index &type) {
assert(!plugin_infos.count(key));
assert(!plugin_factories.count(type) || !plugin_factories[type].count(key));
PluginInfo doc;
doc.key = key;
// Plugin names can be set with document_synopsis. Otherwise, we use the key.
doc.name = key;
doc.type_name = type_name_factory(*this);
doc.synopsis = "";
doc.group = group;
doc.hidden = false;
plugin_infos.insert(make_pair(key, doc));
plugin_factories[type][key] = factory;
}
void Registry::add_plugin_info_arg(
const string &key,
const string &arg_name,
const string &help,
const string &type_name,
const string &default_value,
const Bounds &bounds,
const ValueExplanations &value_explanations) {
get_plugin_info(key).arg_help.emplace_back(
arg_name, help, type_name, default_value, bounds, value_explanations);
}
void Registry::set_plugin_info_synopsis(
const string &key, const string &name, const string &description) {
get_plugin_info(key).name = name;
get_plugin_info(key).synopsis = description;
}
void Registry::add_plugin_info_property(
const string &key, const string &name, const string &description) {
get_plugin_info(key).property_help.emplace_back(name, description);
}
void Registry::add_plugin_info_feature(
const string &key, const string &feature, const string &description) {
get_plugin_info(key).support_help.emplace_back(feature, description);
}
void Registry::add_plugin_info_note(
const string &key, const string &name, const string &description, bool long_text) {
get_plugin_info(key).notes.emplace_back(name, description, long_text);
}
PluginInfo &Registry::get_plugin_info(const string &key) {
/* Use at() to get an error when trying to modify a plugin that has not been
registered with insert_plugin_info. */
return plugin_infos.at(key);
}
vector<string> Registry::get_sorted_plugin_info_keys() {
vector<string> keys;
for (const auto &it : plugin_infos) {
keys.push_back(it.first);
}
sort(keys.begin(), keys.end());
return keys;
}
bool Registry::is_predefinition(const string &key) const {
return predefinition_functions.count(key);
}
void Registry::handle_predefinition(
const string &key, const string &arg, Predefinitions &predefinitions,
bool dry_run) {
predefinition_functions.at(key)(arg, *this, predefinitions, dry_run);
}
}
| 9,819 |
C++
| 34.970696 | 101 | 0.601589 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/doc_utils.h
|
#ifndef OPTIONS_DOC_UTILS_H
#define OPTIONS_DOC_UTILS_H
#include "bounds.h"
#include <functional>
#include <string>
#include <typeindex>
#include <utility>
#include <vector>
namespace options {
class OptionParser;
class Predefinitions;
class Registry;
// See comment in constructor of Plugin in plugin.h.
using DocFactory = std::function<void (OptionParser &)>;
using PluginTypeNameGetter = std::function<std::string(const Registry ®istry)>;
using PredefinitionFunction = std::function<void (const std::string &, Registry &,
Predefinitions &, bool)>;
using ValueExplanations = std::vector<std::pair<std::string, std::string>>;
struct ArgumentInfo {
std::string key;
std::string help;
std::string type_name;
std::string default_value;
Bounds bounds;
ValueExplanations value_explanations;
ArgumentInfo(
const std::string &key,
const std::string &help,
const std::string &type_name,
const std::string &default_value,
const Bounds &bounds,
const ValueExplanations &value_explanations)
: key(key),
help(help),
type_name(type_name),
default_value(default_value),
bounds(bounds),
value_explanations(value_explanations) {
}
};
struct PropertyInfo {
std::string property;
std::string description;
PropertyInfo(const std::string &property, const std::string &description)
: property(property),
description(description) {
}
};
struct NoteInfo {
std::string name;
std::string description;
bool long_text;
NoteInfo(const std::string &name, const std::string &description, bool long_text)
: name(name),
description(description),
long_text(long_text) {
}
};
struct LanguageSupportInfo {
std::string feature;
std::string description;
LanguageSupportInfo(const std::string &feature, const std::string &description)
: feature(feature),
description(description) {
}
};
// Store documentation for a plugin.
struct PluginInfo {
std::string key;
std::string name;
std::string type_name;
std::string synopsis;
std::string group;
std::vector<ArgumentInfo> arg_help;
std::vector<PropertyInfo> property_help;
std::vector<LanguageSupportInfo> support_help;
std::vector<NoteInfo> notes;
bool hidden;
};
/*
The plugin type info class contains meta-information for a given
type of plugins (e.g. "SearchEngine" or "MergeStrategyFactory").
*/
struct PluginTypeInfo {
std::type_index type;
/*
The type name should be "user-friendly". It is for example used
as the name of the wiki page that documents this plugin type.
It follows wiki conventions (e.g. "Heuristic", "SearchEngine",
"ShrinkStrategy").
*/
std::string type_name;
/*
General documentation for the plugin type. This is included at
the top of the wiki page for this plugin type.
*/
std::string documentation;
// Command line argument to predefine Plugins of this PluginType.
std::string predefinition_key;
// Alternative command line arguments to predefine Plugins of this PluginType.
std::string alias;
// Function used to predefine Plugins of this PluginType
PredefinitionFunction predefinition_function;
public:
PluginTypeInfo(const std::type_index &type,
const std::string &type_name,
const std::string &documentation,
const std::string &predefinition_key,
const std::string &alias,
const PredefinitionFunction &predefinition_function);
bool operator<(const PluginTypeInfo &other) const;
};
struct PluginGroupInfo {
std::string group_id;
std::string doc_title;
PluginGroupInfo(const std::string &group_id, const std::string &doc_title)
: group_id(group_id), doc_title(doc_title) {
}
};
}
#endif
| 4,024 |
C
| 25.307189 | 85 | 0.654573 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/options/bounds.cc
|
#include "bounds.h"
using namespace std;
namespace options {
Bounds::Bounds(const string &min, const string &max)
: min(min), max(max) {
}
bool Bounds::has_bound() const {
return !min.empty() || !max.empty();
}
Bounds Bounds::unlimited() {
return Bounds("", "");
}
ostream &operator<<(ostream &out, const Bounds &bounds) {
if (!bounds.min.empty() || !bounds.max.empty())
out << "[" << bounds.min << ", " << bounds.max << "]";
return out;
}
}
| 475 |
C++
| 18.833333 | 62 | 0.587368 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/sampling.h
|
#ifndef TASK_UTILS_SAMPLING_H
#define TASK_UTILS_SAMPLING_H
#include "../task_proxy.h"
#include <functional>
#include <memory>
class State;
namespace successor_generator {
class SuccessorGenerator;
}
namespace utils {
class RandomNumberGenerator;
}
using DeadEndDetector = std::function<bool (State)>;
namespace sampling {
/*
Sample states with random walks.
*/
class RandomWalkSampler {
const OperatorsProxy operators;
const std::unique_ptr<successor_generator::SuccessorGenerator> successor_generator;
const State initial_state;
const double average_operator_costs;
utils::RandomNumberGenerator &rng;
public:
RandomWalkSampler(
const TaskProxy &task_proxy,
utils::RandomNumberGenerator &rng);
~RandomWalkSampler();
/*
Perform a single random walk and return the last visited state.
The walk length is taken from a binomial distribution centered around the
estimated plan length, which is computed as the ratio of the h value of
the initial state divided by the average operator costs. Whenever a dead
end is detected or a state has no successors, restart from the initial
state. The function 'is_dead_end' should return whether a given state is
a dead end. If omitted, no dead end detection is performed. The 'init_h'
value should be an estimate of the solution cost.
*/
State sample_state(
int init_h,
const DeadEndDetector &is_dead_end = [](const State &) {return false;}) const;
};
}
#endif
| 1,529 |
C
| 26.321428 | 87 | 0.717462 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/successor_generator_internals.cc
|
#include "successor_generator_internals.h"
#include "../task_proxy.h"
#include <cassert>
using namespace std;
/*
Notes on possible optimizations:
- Using specialized allocators (e.g. an arena allocator) could
improve cache locality and reduce memory.
- We could keep the different nodes in a single vector (for example
of type unique_ptr<GeneratorBase>) and then use indices rather
than pointers for representing child nodes. This would reduce the
memory overhead for pointers in 64-bit builds. However, this
overhead is not as bad as it used to be.
- Going further down this route, on the more extreme end of the
spectrum, we could use a "byte-code" style representation, where
the successor generator is just a long vector of ints combining
information about node type with node payload.
For example, we could represent different node types as follows,
where BINARY_FORK etc. are symbolic constants for tagging node
types:
- binary fork: [BINARY_FORK, child_1, child_2]
- multi-fork: [MULTI_FORK, n, child_1, ..., child_n]
- vector switch: [VECTOR_SWITCH, var_id, child_1, ..., child_k]
- single switch: [SINGLE_SWITCH, var_id, value, child_index]
- hash switch: [HASH_SWITCH, var_id, map_no]
where map_no is an index into a separate vector of hash maps
(represented out of band)
- single leaf: [SINGLE_LEAF, op_id]
- vector leaf: [VECTOR_LEAF, n, op_id_1, ..., op_id_n]
We could compact this further by permitting to use operator IDs
directly wherever child nodes are used, by using e.g. negative
numbers for operatorIDs and positive numbers for node IDs,
obviating the need for SINGLE_LEAF. This would also make
VECTOR_LEAF redundant, as MULTI_FORK could be used instead.
Further, if the other symbolic constants are negative numbers,
we could represent forks just as [n, child_1, ..., child_n] without
symbolic constant at the start, unifying binary and multi-forks.
To completely unify the representation, not needing hash values
out of band, we might consider switches of the form [SWITCH, k,
var_id, value_1, child_1, ..., value_k, child_k] that permit
binary searching. This would only leave switch and fork nodes, and
we could do away with the type tags by just using +k for one node
type and -k for the other. (But it may be useful to leave the
possibility of the current vector switches for very dense switch
nodes, which could be used in the case where k equals the domain
size of the variable in question.)
- More modestly, we could stick with the current polymorphic code,
but just use more types of nodes, such as switch nodes that stores
a vector of (value, child) pairs to be scanned linearly or with
binary search.
- We can also try to optimize memory usage of the existing nodes
further, e.g. by replacing vectors with something smaller, like a
zero-terminated heap-allocated array.
*/
namespace successor_generator {
GeneratorForkBinary::GeneratorForkBinary(
unique_ptr<GeneratorBase> generator1,
unique_ptr<GeneratorBase> generator2)
: generator1(move(generator1)),
generator2(move(generator2)) {
/* There is no reason to use a fork if only one of the generators exists.
Use the existing generator directly if one of them exists or a nullptr
otherwise. */
assert(this->generator1);
assert(this->generator2);
}
void GeneratorForkBinary::generate_applicable_ops(
const vector<int> &state, vector<OperatorID> &applicable_ops) const {
generator1->generate_applicable_ops(state, applicable_ops);
generator2->generate_applicable_ops(state, applicable_ops);
}
GeneratorForkMulti::GeneratorForkMulti(vector<unique_ptr<GeneratorBase>> children)
: children(move(children)) {
/* Note that we permit 0-ary forks as a way to define empty
successor generators (for tasks with no operators). It is
the responsibility of the factory code to make sure they
are not generated in other circumstances. */
assert(this->children.empty() || this->children.size() >= 2);
}
void GeneratorForkMulti::generate_applicable_ops(
const vector<int> &state, vector<OperatorID> &applicable_ops) const {
for (const auto &generator : children)
generator->generate_applicable_ops(state, applicable_ops);
}
GeneratorSwitchVector::GeneratorSwitchVector(
int switch_var_id, vector<unique_ptr<GeneratorBase>> &&generator_for_value)
: switch_var_id(switch_var_id),
generator_for_value(move(generator_for_value)) {
}
void GeneratorSwitchVector::generate_applicable_ops(
const vector<int> &state, vector<OperatorID> &applicable_ops) const {
int val = state[switch_var_id];
const unique_ptr<GeneratorBase> &generator_for_val = generator_for_value[val];
if (generator_for_val) {
generator_for_val->generate_applicable_ops(state, applicable_ops);
}
}
GeneratorSwitchHash::GeneratorSwitchHash(
int switch_var_id,
unordered_map<int, unique_ptr<GeneratorBase>> &&generator_for_value)
: switch_var_id(switch_var_id),
generator_for_value(move(generator_for_value)) {
}
void GeneratorSwitchHash::generate_applicable_ops(
const vector<int> &state, vector<OperatorID> &applicable_ops) const {
int val = state[switch_var_id];
const auto &child = generator_for_value.find(val);
if (child != generator_for_value.end()) {
const unique_ptr<GeneratorBase> &generator_for_val = child->second;
generator_for_val->generate_applicable_ops(state, applicable_ops);
}
}
GeneratorSwitchSingle::GeneratorSwitchSingle(
int switch_var_id, int value, unique_ptr<GeneratorBase> generator_for_value)
: switch_var_id(switch_var_id),
value(value),
generator_for_value(move(generator_for_value)) {
}
void GeneratorSwitchSingle::generate_applicable_ops(
const vector<int> &state, vector<OperatorID> &applicable_ops) const {
if (value == state[switch_var_id]) {
generator_for_value->generate_applicable_ops(state, applicable_ops);
}
}
GeneratorLeafVector::GeneratorLeafVector(vector<OperatorID> &&applicable_operators)
: applicable_operators(move(applicable_operators)) {
}
void GeneratorLeafVector::generate_applicable_ops(
const vector<int> &, vector<OperatorID> &applicable_ops) const {
/*
In our experiments (issue688), a loop over push_back was faster
here than doing this with a single insert call because the
containers are typically very small. However, we have changed
the container type from list to vector since then, so this might
no longer apply.
*/
for (OperatorID id : applicable_operators) {
applicable_ops.push_back(id);
}
}
GeneratorLeafSingle::GeneratorLeafSingle(OperatorID applicable_operator)
: applicable_operator(applicable_operator) {
}
void GeneratorLeafSingle::generate_applicable_ops(
const vector<int> &, vector<OperatorID> &applicable_ops) const {
applicable_ops.push_back(applicable_operator);
}
}
| 7,099 |
C++
| 39.112994 | 83 | 0.718975 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/variable_order_finder.cc
|
#include "variable_order_finder.h"
#include "../task_utils/causal_graph.h"
#include "../utils/logging.h"
#include "../utils/system.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <vector>
using namespace std;
using utils::ExitCode;
namespace variable_order_finder {
VariableOrderFinder::VariableOrderFinder(const TaskProxy &task_proxy,
VariableOrderType variable_order_type)
: task_proxy(task_proxy),
variable_order_type(variable_order_type) {
int var_count = task_proxy.get_variables().size();
if (variable_order_type == REVERSE_LEVEL) {
for (int i = 0; i < var_count; ++i)
remaining_vars.push_back(i);
} else {
for (int i = var_count - 1; i >= 0; --i)
remaining_vars.push_back(i);
}
if (variable_order_type == CG_GOAL_RANDOM ||
variable_order_type == RANDOM) {
// TODO: use an instance of RandomNumberGenerator for shuffling.
random_shuffle(remaining_vars.begin(), remaining_vars.end());
}
is_causal_predecessor.resize(var_count, false);
is_goal_variable.resize(var_count, false);
for (FactProxy goal : task_proxy.get_goals())
is_goal_variable[goal.get_variable().get_id()] = true;
}
void VariableOrderFinder::select_next(int position, int var_no) {
assert(remaining_vars[position] == var_no);
remaining_vars.erase(remaining_vars.begin() + position);
selected_vars.push_back(var_no);
const causal_graph::CausalGraph &cg = task_proxy.get_causal_graph();
const vector<int> &new_vars = cg.get_eff_to_pre(var_no);
for (int new_var : new_vars)
is_causal_predecessor[new_var] = true;
}
bool VariableOrderFinder::done() const {
return remaining_vars.empty();
}
int VariableOrderFinder::next() {
assert(!done());
if (variable_order_type == CG_GOAL_LEVEL || variable_order_type
== CG_GOAL_RANDOM) {
// First run: Try to find a causally connected variable.
for (size_t i = 0; i < remaining_vars.size(); ++i) {
int var_no = remaining_vars[i];
if (is_causal_predecessor[var_no]) {
select_next(i, var_no);
return var_no;
}
}
// Second run: Try to find a goal variable.
for (size_t i = 0; i < remaining_vars.size(); ++i) {
int var_no = remaining_vars[i];
if (is_goal_variable[var_no]) {
select_next(i, var_no);
return var_no;
}
}
} else if (variable_order_type == GOAL_CG_LEVEL) {
// First run: Try to find a goal variable.
for (size_t i = 0; i < remaining_vars.size(); ++i) {
int var_no = remaining_vars[i];
if (is_goal_variable[var_no]) {
select_next(i, var_no);
return var_no;
}
}
// Second run: Try to find a causally connected variable.
for (size_t i = 0; i < remaining_vars.size(); ++i) {
int var_no = remaining_vars[i];
if (is_causal_predecessor[var_no]) {
select_next(i, var_no);
return var_no;
}
}
} else if (variable_order_type == RANDOM ||
variable_order_type == LEVEL ||
variable_order_type == REVERSE_LEVEL) {
int var_no = remaining_vars[0];
select_next(0, var_no);
return var_no;
}
cerr << "Relevance analysis has not been performed." << endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
void dump_variable_order_type(VariableOrderType variable_order_type) {
utils::g_log << "Variable order type: ";
switch (variable_order_type) {
case CG_GOAL_LEVEL:
utils::g_log << "CG/GOAL, tie breaking on level (main)";
break;
case CG_GOAL_RANDOM:
utils::g_log << "CG/GOAL, tie breaking random";
break;
case GOAL_CG_LEVEL:
utils::g_log << "GOAL/CG, tie breaking on level";
break;
case RANDOM:
utils::g_log << "random";
break;
case LEVEL:
utils::g_log << "by level";
break;
case REVERSE_LEVEL:
utils::g_log << "by reverse level";
break;
default:
ABORT("Unknown variable order type.");
}
utils::g_log << endl;
}
}
| 4,363 |
C++
| 32.060606 | 79 | 0.57025 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/successor_generator_factory.h
|
#ifndef TASK_UTILS_SUCCESSOR_GENERATOR_FACTORY_H
#define TASK_UTILS_SUCCESSOR_GENERATOR_FACTORY_H
#include <memory>
#include <vector>
class TaskProxy;
namespace successor_generator {
class GeneratorBase;
using GeneratorPtr = std::unique_ptr<GeneratorBase>;
struct OperatorRange;
class OperatorInfo;
class SuccessorGeneratorFactory {
using ValuesAndGenerators = std::vector<std::pair<int, GeneratorPtr>>;
const TaskProxy &task_proxy;
std::vector<OperatorInfo> operator_infos;
GeneratorPtr construct_fork(std::vector<GeneratorPtr> nodes) const;
GeneratorPtr construct_leaf(OperatorRange range) const;
GeneratorPtr construct_switch(
int switch_var_id, ValuesAndGenerators values_and_generators) const;
GeneratorPtr construct_recursive(int depth, OperatorRange range) const;
public:
explicit SuccessorGeneratorFactory(const TaskProxy &task_proxy);
// Destructor cannot be implicit because OperatorInfo is forward-declared.
~SuccessorGeneratorFactory();
GeneratorPtr create();
};
}
#endif
| 1,046 |
C
| 26.552631 | 78 | 0.772467 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/variable_order_finder.h
|
#ifndef TASK_UTILS_VARIABLE_ORDER_FINDER_H
#define TASK_UTILS_VARIABLE_ORDER_FINDER_H
#include "../task_proxy.h"
#include <memory>
#include <vector>
namespace variable_order_finder {
enum VariableOrderType {
CG_GOAL_LEVEL,
CG_GOAL_RANDOM,
GOAL_CG_LEVEL,
RANDOM,
LEVEL,
REVERSE_LEVEL
};
extern void dump_variable_order_type(VariableOrderType variable_order_type);
/*
NOTE: VariableOrderFinder keeps a reference to the task proxy passed to the
constructor. Therefore, users of the class must ensure that the task lives at
least as long as the variable order finder.
*/
class VariableOrderFinder {
TaskProxy task_proxy;
const VariableOrderType variable_order_type;
std::vector<int> selected_vars;
std::vector<int> remaining_vars;
std::vector<bool> is_goal_variable;
std::vector<bool> is_causal_predecessor;
void select_next(int position, int var_no);
public:
VariableOrderFinder(const TaskProxy &task_proxy,
VariableOrderType variable_order_type);
~VariableOrderFinder() = default;
bool done() const;
int next();
};
}
#endif
| 1,127 |
C
| 24.066666 | 79 | 0.709849 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/successor_generator.h
|
#ifndef TASK_UTILS_SUCCESSOR_GENERATOR_H
#define TASK_UTILS_SUCCESSOR_GENERATOR_H
#include "../per_task_information.h"
#include <memory>
#include <vector>
class OperatorID;
class State;
class TaskProxy;
namespace successor_generator {
class GeneratorBase;
class SuccessorGenerator {
std::unique_ptr<GeneratorBase> root;
public:
explicit SuccessorGenerator(const TaskProxy &task_proxy);
/*
We cannot use the default destructor (implicitly or explicitly)
here because GeneratorBase is a forward declaration and the
incomplete type cannot be destroyed.
*/
~SuccessorGenerator();
void generate_applicable_ops(
const State &state, std::vector<OperatorID> &applicable_ops) const;
};
extern PerTaskInformation<SuccessorGenerator> g_successor_generators;
}
#endif
| 815 |
C
| 21.666666 | 75 | 0.746012 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/causal_graph.cc
|
#include "causal_graph.h"
#include "../task_proxy.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include "../utils/timer.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <unordered_map>
#include <unordered_set>
using namespace std;
/*
We only want to create one causal graph per task, so they are cached globally.
TODO: We need to rethink the memory management here. Objects in this cache are
never reclaimed (before termination of the program). Also, currently every
heuristic that uses one would receive its own causal graph object even if it
uses an unmodified task because it will create its own copy of
CostAdaptedTask.
We have the same problems for other objects that are associated with tasks
(causal graphs, successor generators and axiom evlauators, DTGs, ...) and can
maybe deal with all of them in the same way.
*/
namespace causal_graph {
static unordered_map<const AbstractTask *,
unique_ptr<CausalGraph>> causal_graph_cache;
/*
An IntRelationBuilder constructs an IntRelation by adding one pair
to the relation at a time. Duplicates are automatically handled
(i.e., it is OK to add the same pair twice), and the pairs need not
be added in any specific sorted order.
Define the following parameters:
- K: range of the IntRelation (i.e., allowed values {0, ..., K - 1})
- M: number of pairs added to the relation (including duplicates)
- N: number of unique pairs in the final relation
- D: maximal number of unique elements (x, y) in the relation for given x
Then we get:
- O(K + N) memory usage during construction and for final IntRelation
- O(K + M + N log D) construction time
*/
class IntRelationBuilder {
typedef unordered_set<int> IntSet;
vector<IntSet> int_sets;
int get_range() const;
public:
explicit IntRelationBuilder(int range);
~IntRelationBuilder();
void add_pair(int u, int v);
void compute_relation(IntRelation &result) const;
};
IntRelationBuilder::IntRelationBuilder(int range)
: int_sets(range) {
}
IntRelationBuilder::~IntRelationBuilder() {
}
int IntRelationBuilder::get_range() const {
return int_sets.size();
}
void IntRelationBuilder::add_pair(int u, int v) {
assert(u >= 0 && u < get_range());
assert(v >= 0 && v < get_range());
int_sets[u].insert(v);
}
void IntRelationBuilder::compute_relation(IntRelation &result) const {
int range = get_range();
result.clear();
result.resize(range);
for (int i = 0; i < range; ++i) {
result[i].assign(int_sets[i].begin(), int_sets[i].end());
sort(result[i].begin(), result[i].end());
}
}
struct CausalGraphBuilder {
IntRelationBuilder pre_eff_builder;
IntRelationBuilder eff_pre_builder;
IntRelationBuilder eff_eff_builder;
IntRelationBuilder succ_builder;
IntRelationBuilder pred_builder;
explicit CausalGraphBuilder(int var_count)
: pre_eff_builder(var_count),
eff_pre_builder(var_count),
eff_eff_builder(var_count),
succ_builder(var_count),
pred_builder(var_count) {
}
~CausalGraphBuilder() {
}
void handle_pre_eff_arc(int u, int v) {
assert(u != v);
pre_eff_builder.add_pair(u, v);
succ_builder.add_pair(u, v);
eff_pre_builder.add_pair(v, u);
pred_builder.add_pair(v, u);
}
void handle_eff_eff_edge(int u, int v) {
assert(u != v);
eff_eff_builder.add_pair(u, v);
eff_eff_builder.add_pair(v, u);
succ_builder.add_pair(u, v);
succ_builder.add_pair(v, u);
pred_builder.add_pair(u, v);
pred_builder.add_pair(v, u);
}
void handle_operator(const OperatorProxy &op) {
EffectsProxy effects = op.get_effects();
// Handle pre->eff links from preconditions.
for (FactProxy pre : op.get_preconditions()) {
int pre_var_id = pre.get_variable().get_id();
for (EffectProxy eff : effects) {
int eff_var_id = eff.get_fact().get_variable().get_id();
if (pre_var_id != eff_var_id)
handle_pre_eff_arc(pre_var_id, eff_var_id);
}
}
// Handle pre->eff links from effect conditions.
for (EffectProxy eff : effects) {
VariableProxy eff_var = eff.get_fact().get_variable();
int eff_var_id = eff_var.get_id();
for (FactProxy pre : eff.get_conditions()) {
int pre_var_id = pre.get_variable().get_id();
if (pre_var_id != eff_var_id)
handle_pre_eff_arc(pre_var_id, eff_var_id);
}
}
// Handle eff->eff links.
for (size_t i = 0; i < effects.size(); ++i) {
int eff1_var_id = effects[i].get_fact().get_variable().get_id();
for (size_t j = i + 1; j < effects.size(); ++j) {
int eff2_var_id = effects[j].get_fact().get_variable().get_id();
if (eff1_var_id != eff2_var_id)
handle_eff_eff_edge(eff1_var_id, eff2_var_id);
}
}
}
};
CausalGraph::CausalGraph(const TaskProxy &task_proxy) {
utils::Timer timer;
utils::g_log << "building causal graph..." << flush;
int num_variables = task_proxy.get_variables().size();
CausalGraphBuilder cg_builder(num_variables);
for (OperatorProxy op : task_proxy.get_operators())
cg_builder.handle_operator(op);
for (OperatorProxy op : task_proxy.get_axioms())
cg_builder.handle_operator(op);
cg_builder.pre_eff_builder.compute_relation(pre_to_eff);
cg_builder.eff_pre_builder.compute_relation(eff_to_pre);
cg_builder.eff_eff_builder.compute_relation(eff_to_eff);
cg_builder.pred_builder.compute_relation(predecessors);
cg_builder.succ_builder.compute_relation(successors);
// dump(task_proxy);
utils::g_log << "done! [t=" << timer << "]" << endl;
}
void CausalGraph::dump(const TaskProxy &task_proxy) const {
utils::g_log << "Causal graph: " << endl;
for (VariableProxy var : task_proxy.get_variables()) {
int var_id = var.get_id();
utils::g_log << "#" << var_id << " [" << var.get_name() << "]:" << endl
<< " pre->eff arcs: " << pre_to_eff[var_id] << endl
<< " eff->pre arcs: " << eff_to_pre[var_id] << endl
<< " eff->eff arcs: " << eff_to_eff[var_id] << endl
<< " successors: " << successors[var_id] << endl
<< " predecessors: " << predecessors[var_id] << endl;
}
}
const CausalGraph &get_causal_graph(const AbstractTask *task) {
if (causal_graph_cache.count(task) == 0) {
TaskProxy task_proxy(*task);
causal_graph_cache.insert(
make_pair(task, utils::make_unique_ptr<CausalGraph>(task_proxy)));
}
return *causal_graph_cache[task];
}
}
| 6,957 |
C++
| 31.362791 | 80 | 0.608739 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/sampling.cc
|
#include "sampling.h"
#include "successor_generator.h"
#include "../task_proxy.h"
#include "../task_utils/task_properties.h"
#include "../utils/memory.h"
#include "../utils/rng.h"
using namespace std;
namespace sampling {
static State sample_state_with_random_walk(
const OperatorsProxy &operators,
const State &initial_state,
const successor_generator::SuccessorGenerator &successor_generator,
int init_h,
double average_operator_cost,
utils::RandomNumberGenerator &rng,
function<bool(State)> is_dead_end) {
assert(init_h != numeric_limits<int>::max());
int n;
if (init_h == 0) {
n = 10;
} else {
/*
Convert heuristic value into an approximate number of actions
(does nothing on unit-cost problems).
average_operator_cost cannot equal 0, as in this case, all operators
must have costs of 0 and in this case the if-clause triggers.
*/
assert(average_operator_cost != 0);
int solution_steps_estimate = int((init_h / average_operator_cost) + 0.5);
n = 4 * solution_steps_estimate;
}
double p = 0.5;
/* The expected walk length is np = 2 * estimated number of solution steps.
(We multiply by 2 because the heuristic is underestimating.) */
// Calculate length of random walk according to a binomial distribution.
int length = 0;
for (int j = 0; j < n; ++j) {
double random = rng(); // [0..1)
if (random < p)
++length;
}
// Sample one state with a random walk of length length.
State current_state(initial_state);
vector<OperatorID> applicable_operators;
for (int j = 0; j < length; ++j) {
applicable_operators.clear();
successor_generator.generate_applicable_ops(current_state,
applicable_operators);
// If there are no applicable operators, do not walk further.
if (applicable_operators.empty()) {
break;
} else {
OperatorID random_op_id = *rng.choose(applicable_operators);
OperatorProxy random_op = operators[random_op_id];
assert(task_properties::is_applicable(random_op, current_state));
current_state = current_state.get_unregistered_successor(random_op);
/* If current state is a dead end, then restart the random walk
with the initial state. */
if (is_dead_end(current_state)) {
current_state = State(initial_state);
}
}
}
// The last state of the random walk is used as a sample.
return current_state;
}
RandomWalkSampler::RandomWalkSampler(
const TaskProxy &task_proxy,
utils::RandomNumberGenerator &rng)
: operators(task_proxy.get_operators()),
successor_generator(utils::make_unique_ptr<successor_generator::SuccessorGenerator>(task_proxy)),
initial_state(task_proxy.get_initial_state()),
average_operator_costs(task_properties::get_average_operator_cost(task_proxy)),
rng(rng) {
}
RandomWalkSampler::~RandomWalkSampler() {
}
State RandomWalkSampler::sample_state(
int init_h, const DeadEndDetector &is_dead_end) const {
return sample_state_with_random_walk(
operators,
initial_state,
*successor_generator,
init_h,
average_operator_costs,
rng,
is_dead_end);
}
}
| 3,433 |
C++
| 32.666666 | 103 | 0.625692 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/successor_generator_factory.cc
|
#include "successor_generator_factory.h"
#include "successor_generator_internals.h"
#include "../task_proxy.h"
#include "../utils/collections.h"
#include "../utils/memory.h"
#include <algorithm>
#include <cassert>
using namespace std;
/*
The key ideas of the construction algorithm are as follows.
Initially, we sort the preconditions of the operators
lexicographically.
We then group the operators by the *first variable* to be tested,
forming a group for each variable and possibly a special group for
operators with no precondition. (This special group forms the base
case of the recursion, leading to a leaf node in the successor
generator.) Each group forms a contiguous subrange of the overall
operator sequence.
We then further group each subsequence (except for the special one)
by the *value* that the given variable is tested against, again
obtaining contiguous subranges of the original operator sequence.
For each of these subranges, we "pop" the first condition and then
recursively apply the same construction algorithm to compute a child
successor generator for this subset of operators.
Successor generators for different values of the same variable are
then combined into a switch node, and the generated switch nodes for
different variables are combined into a fork node.
The important property of lexicographic sorting that we exploit here
is that if the original sequence is sorted, then all subsequences we
must consider recursively are also sorted. Crucially, this remains
true when we pop the first condition, because this popping always
happens within a subsequence where all operators have the *same*
first condition.
To make the implementation more efficient, we do not physically pop
conditions but only keep track of how many conditions have been
dealt with so far, which is simply the recursion depth of the
"construct_recursive" function.
Because we only consider contiguous subranges of the operator
sequence and never need to modify any of the data describing the
operators, we can simply keep track of the current operator sequence
by a begin and end index into the overall operator sequence.
*/
namespace successor_generator {
struct OperatorRange {
int begin;
int end;
OperatorRange(int begin, int end)
: begin(begin), end(end) {
}
bool empty() const {
return begin == end;
}
int span() const {
return end - begin;
}
};
class OperatorInfo {
/*
The attributes are not const because we must support
assignment/swapping to sort vector<OperatorInfo>.
*/
OperatorID op;
vector<FactPair> precondition;
public:
OperatorInfo(OperatorID op, vector<FactPair> precondition)
: op(op),
precondition(move(precondition)) {
}
bool operator<(const OperatorInfo &other) const {
return precondition < other.precondition;
}
OperatorID get_op() const {
return op;
}
// Returns -1 as a past-the-end sentinel.
int get_var(int depth) const {
if (depth == static_cast<int>(precondition.size())) {
return -1;
} else {
return precondition[depth].var;
}
}
int get_value(int depth) const {
return precondition[depth].value;
}
};
enum class GroupOperatorsBy {
VAR,
VALUE
};
class OperatorGrouper {
const vector<OperatorInfo> &operator_infos;
const int depth;
const GroupOperatorsBy group_by;
OperatorRange range;
const OperatorInfo &get_current_op_info() const {
assert(!range.empty());
return operator_infos[range.begin];
}
int get_current_group_key() const {
const OperatorInfo &op_info = get_current_op_info();
if (group_by == GroupOperatorsBy::VAR) {
return op_info.get_var(depth);
} else {
assert(group_by == GroupOperatorsBy::VALUE);
return op_info.get_value(depth);
}
}
public:
explicit OperatorGrouper(
const vector<OperatorInfo> &operator_infos,
int depth,
GroupOperatorsBy group_by,
OperatorRange range)
: operator_infos(operator_infos),
depth(depth),
group_by(group_by),
range(range) {
}
bool done() const {
return range.empty();
}
pair<int, OperatorRange> next() {
assert(!range.empty());
int key = get_current_group_key();
int group_begin = range.begin;
do {
++range.begin;
} while (!range.empty() && get_current_group_key() == key);
OperatorRange group_range(group_begin, range.begin);
return make_pair(key, group_range);
}
};
SuccessorGeneratorFactory::SuccessorGeneratorFactory(
const TaskProxy &task_proxy)
: task_proxy(task_proxy) {
}
SuccessorGeneratorFactory::~SuccessorGeneratorFactory() = default;
GeneratorPtr SuccessorGeneratorFactory::construct_fork(
vector<GeneratorPtr> nodes) const {
int size = nodes.size();
if (size == 1) {
return move(nodes.at(0));
} else if (size == 2) {
return utils::make_unique_ptr<GeneratorForkBinary>(
move(nodes.at(0)), move(nodes.at(1)));
} else {
/* This general case includes the case size == 0, which can
(only) happen for the root for tasks with no operators. */
return utils::make_unique_ptr<GeneratorForkMulti>(move(nodes));
}
}
GeneratorPtr SuccessorGeneratorFactory::construct_leaf(
OperatorRange range) const {
assert(!range.empty());
vector<OperatorID> operators;
operators.reserve(range.span());
while (range.begin != range.end) {
operators.emplace_back(operator_infos[range.begin].get_op());
++range.begin;
}
if (operators.size() == 1) {
return utils::make_unique_ptr<GeneratorLeafSingle>(operators.front());
} else {
return utils::make_unique_ptr<GeneratorLeafVector>(move(operators));
}
}
GeneratorPtr SuccessorGeneratorFactory::construct_switch(
int switch_var_id, ValuesAndGenerators values_and_generators) const {
VariablesProxy variables = task_proxy.get_variables();
int var_domain = variables[switch_var_id].get_domain_size();
int num_children = values_and_generators.size();
assert(num_children > 0);
if (num_children == 1) {
int value = values_and_generators[0].first;
GeneratorPtr generator = move(values_and_generators[0].second);
return utils::make_unique_ptr<GeneratorSwitchSingle>(
switch_var_id, value, move(generator));
}
int vector_bytes = utils::estimate_vector_bytes<GeneratorPtr>(var_domain);
int hash_bytes = utils::estimate_unordered_map_bytes<int, GeneratorPtr>(num_children);
if (hash_bytes < vector_bytes) {
unordered_map<int, GeneratorPtr> generator_by_value;
for (auto &item : values_and_generators)
generator_by_value[item.first] = move(item.second);
return utils::make_unique_ptr<GeneratorSwitchHash>(
switch_var_id, move(generator_by_value));
} else {
vector<GeneratorPtr> generator_by_value(var_domain);
for (auto &item : values_and_generators)
generator_by_value[item.first] = move(item.second);
return utils::make_unique_ptr<GeneratorSwitchVector>(
switch_var_id, move(generator_by_value));
}
}
GeneratorPtr SuccessorGeneratorFactory::construct_recursive(
int depth, OperatorRange range) const {
vector<GeneratorPtr> nodes;
OperatorGrouper grouper_by_var(
operator_infos, depth, GroupOperatorsBy::VAR, range);
while (!grouper_by_var.done()) {
auto var_group = grouper_by_var.next();
int var = var_group.first;
OperatorRange var_range = var_group.second;
if (var == -1) {
// Handle a group of immediately applicable operators.
nodes.push_back(construct_leaf(var_range));
} else {
// Handle a group of operators sharing the first precondition variable.
ValuesAndGenerators values_and_generators;
OperatorGrouper grouper_by_value(
operator_infos, depth, GroupOperatorsBy::VALUE, var_range);
while (!grouper_by_value.done()) {
auto value_group = grouper_by_value.next();
int value = value_group.first;
OperatorRange value_range = value_group.second;
values_and_generators.emplace_back(
value, construct_recursive(depth + 1, value_range));
}
nodes.push_back(construct_switch(
var, move(values_and_generators)));
}
}
return construct_fork(move(nodes));
}
static vector<FactPair> build_sorted_precondition(const OperatorProxy &op) {
vector<FactPair> precond;
precond.reserve(op.get_preconditions().size());
for (FactProxy pre : op.get_preconditions())
precond.emplace_back(pre.get_pair());
// Preconditions must be sorted by variable.
sort(precond.begin(), precond.end());
return precond;
}
GeneratorPtr SuccessorGeneratorFactory::create() {
OperatorsProxy operators = task_proxy.get_operators();
operator_infos.reserve(operators.size());
for (OperatorProxy op : operators) {
operator_infos.emplace_back(
OperatorID(op.get_id()), build_sorted_precondition(op));
}
/* Use stable_sort rather than sort for reproducibility.
This amounts to breaking ties by operator ID. */
stable_sort(operator_infos.begin(), operator_infos.end());
OperatorRange full_range(0, operator_infos.size());
GeneratorPtr root = construct_recursive(0, full_range);
operator_infos.clear();
return root;
}
}
| 9,845 |
C++
| 31.82 | 90 | 0.661554 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/causal_graph.h
|
#ifndef TASK_UTILS_CAUSAL_GRAPH_H
#define TASK_UTILS_CAUSAL_GRAPH_H
/*
TODO: Perform some memory profiling on this class.
This implementation precomputes some information (in particular the
"predecessor" and "successor" information) that could also be
computed on the fly. Is this a good time/memory tradeoff? We usually
expect causal graphs to be rather small, but we have some planning
tasks with huge numbers of variables.
In the other direction, it might also be useful to have even more
causal graph variants directly available, e.g. a "get_neighbors"
method that is the union of pre->eff, eff->pre and eff->eff arcs.
Before doing this, we should check that they are useful somewhere
and do the memory profiling.
*/
/*
An IntRelation represents a relation on a set {0, ..., K - 1} as an
adjacency list, encoded as a vector<vector<int> >. For example, the
relation { (0, 1), (0, 3), (1, 3), (3, 0), (3, 1), (3, 2) } over the
set {0, 1, 3, 4} would be represented as
[
[1, 3], # representing (0, 1), (0, 3)
[3], # representing (1, 3)
[], # there are no pairs of the form (2, v)
[0, 1, 2], # representing (3, 0), (3, 1), (3, 2)
[] # there are no pairs of the form (4, v)
]
The number K is called the range of the relation.
The individual lists are guaranteed to be sorted and free of
duplicates.
TODO: IntRelations, along with the efficient way of constructing
them in causal_graph.cc, could be useful for other parts of the
planner, too. If this is the case, they should be moved to a
different source file.
TODO: IntRelations currently only work for relations on {0, ..., K -
1}^2. They could easily be generalized to relations on {0, ..., K -
1 } x S for arbitrary sets S. Our current code only requires that S
is hashable and sortable, and we have one assertion that checks that
S = {0, ..., K - 1}. This could easily be changed if such a
generalization is useful anywhere in the code.
*/
#include <vector>
typedef std::vector<std::vector<int>> IntRelation;
class AbstractTask;
class TaskProxy;
namespace causal_graph {
class CausalGraph {
IntRelation pre_to_eff;
IntRelation eff_to_pre;
IntRelation eff_to_eff;
IntRelation successors;
IntRelation predecessors;
void dump(const TaskProxy &task_proxy) const;
public:
/* Use the factory function get_causal_graph to create causal graphs
to avoid creating more than one causal graph per AbstractTask. */
explicit CausalGraph(const TaskProxy &task_proxy);
~CausalGraph() = default;
/*
All below methods querying neighbors (of some sort or other) of
var guarantee that:
- the return vertex list is sorted
- var itself is not in the returned list
"Successors" and "predecessors" are w.r.t. the common definition
of causal graphs, which have pre->eff and eff->eff arcs.
Note that axioms are treated as operators in the causal graph,
i.e., their condition variables are treated as precondition
variables and the derived variable is treated as an effect
variable.
For effect conditions, we only add pre->eff arcs for the respective
conditional effect.
*/
const std::vector<int> &get_pre_to_eff(int var) const {
return pre_to_eff[var];
}
const std::vector<int> &get_eff_to_pre(int var) const {
return eff_to_pre[var];
}
const std::vector<int> &get_eff_to_eff(int var) const {
return eff_to_eff[var];
}
const std::vector<int> &get_successors(int var) const {
return successors[var];
}
const std::vector<int> &get_predecessors(int var) const {
return predecessors[var];
}
};
/* Create or retrieve a causal graph from cache. If causal graphs are created
with this function, we build at most one causal graph per AbstractTask. */
extern const CausalGraph &get_causal_graph(const AbstractTask *task);
}
#endif
| 4,003 |
C
| 32.090909 | 77 | 0.677992 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/successor_generator.cc
|
#include "successor_generator.h"
#include "successor_generator_factory.h"
#include "successor_generator_internals.h"
#include "../abstract_task.h"
using namespace std;
namespace successor_generator {
SuccessorGenerator::SuccessorGenerator(const TaskProxy &task_proxy)
: root(SuccessorGeneratorFactory(task_proxy).create()) {
}
SuccessorGenerator::~SuccessorGenerator() = default;
void SuccessorGenerator::generate_applicable_ops(
const State &state, vector<OperatorID> &applicable_ops) const {
state.unpack();
root->generate_applicable_ops(state.get_unpacked_values(), applicable_ops);
}
PerTaskInformation<SuccessorGenerator> g_successor_generators;
}
| 676 |
C++
| 26.079999 | 79 | 0.773669 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/task_properties.h
|
#ifndef TASK_UTILS_TASK_PROPERTIES_H
#define TASK_UTILS_TASK_PROPERTIES_H
#include "../per_task_information.h"
#include "../task_proxy.h"
#include "../algorithms/int_packer.h"
namespace task_properties {
inline bool is_applicable(OperatorProxy op, const State &state) {
for (FactProxy precondition : op.get_preconditions()) {
if (state[precondition.get_variable()] != precondition)
return false;
}
return true;
}
inline bool is_goal_state(TaskProxy task, const State &state) {
for (FactProxy goal : task.get_goals()) {
if (state[goal.get_variable()] != goal)
return false;
}
return true;
}
/*
Return true iff all operators have cost 1.
Runtime: O(n), where n is the number of operators.
*/
extern bool is_unit_cost(TaskProxy task);
// Runtime: O(1)
extern bool has_axioms(TaskProxy task);
/*
Report an error and exit with ExitCode::UNSUPPORTED if the task has axioms.
Runtime: O(1)
*/
extern void verify_no_axioms(TaskProxy task);
// Runtime: O(n), where n is the number of operators.
extern bool has_conditional_effects(TaskProxy task);
/*
Report an error and exit with ExitCode::UNSUPPORTED if the task has
conditional effects.
Runtime: O(n), where n is the number of operators.
*/
extern void verify_no_conditional_effects(TaskProxy task);
extern std::vector<int> get_operator_costs(const TaskProxy &task_proxy);
extern double get_average_operator_cost(TaskProxy task_proxy);
extern int get_min_operator_cost(TaskProxy task_proxy);
/*
Return the number of facts of the task.
Runtime: O(n), where n is the number of state variables.
*/
extern int get_num_facts(const TaskProxy &task_proxy);
/*
Return the total number of effects of the task, including the
effects of axioms.
Runtime: O(n), where n is the number of operators and axioms.
*/
extern int get_num_total_effects(const TaskProxy &task_proxy);
template<class FactProxyCollection>
std::vector<FactPair> get_fact_pairs(const FactProxyCollection &facts) {
std::vector<FactPair> fact_pairs;
fact_pairs.reserve(facts.size());
for (FactProxy fact : facts) {
fact_pairs.push_back(fact.get_pair());
}
return fact_pairs;
}
extern void print_variable_statistics(const TaskProxy &task_proxy);
extern void dump_pddl(const State &state);
extern void dump_fdr(const State &state);
extern void dump_goals(const GoalsProxy &goals);
extern void dump_task(const TaskProxy &task_proxy);
extern PerTaskInformation<int_packer::IntPacker> g_state_packers;
}
#endif
| 2,537 |
C
| 27.516854 | 77 | 0.718565 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/successor_generator_internals.h
|
#ifndef TASK_UTILS_SUCCESSOR_GENERATOR_INTERNALS_H
#define TASK_UTILS_SUCCESSOR_GENERATOR_INTERNALS_H
#include "../operator_id.h"
#include <memory>
#include <unordered_map>
#include <vector>
class State;
namespace successor_generator {
class GeneratorBase {
public:
virtual ~GeneratorBase() {}
virtual void generate_applicable_ops(
const std::vector<int> &state, std::vector<OperatorID> &applicable_ops) const = 0;
};
class GeneratorForkBinary : public GeneratorBase {
std::unique_ptr<GeneratorBase> generator1;
std::unique_ptr<GeneratorBase> generator2;
public:
GeneratorForkBinary(
std::unique_ptr<GeneratorBase> generator1,
std::unique_ptr<GeneratorBase> generator2);
virtual void generate_applicable_ops(
const std::vector<int> &state, std::vector<OperatorID> &applicable_ops) const override;
};
class GeneratorForkMulti : public GeneratorBase {
std::vector<std::unique_ptr<GeneratorBase>> children;
public:
GeneratorForkMulti(std::vector<std::unique_ptr<GeneratorBase>> children);
virtual void generate_applicable_ops(
const std::vector<int> &state, std::vector<OperatorID> &applicable_ops) const override;
};
class GeneratorSwitchVector : public GeneratorBase {
int switch_var_id;
std::vector<std::unique_ptr<GeneratorBase>> generator_for_value;
public:
GeneratorSwitchVector(
int switch_var_id,
std::vector<std::unique_ptr<GeneratorBase>> &&generator_for_value);
virtual void generate_applicable_ops(
const std::vector<int> &state, std::vector<OperatorID> &applicable_ops) const override;
};
class GeneratorSwitchHash : public GeneratorBase {
int switch_var_id;
std::unordered_map<int, std::unique_ptr<GeneratorBase>> generator_for_value;
public:
GeneratorSwitchHash(
int switch_var_id,
std::unordered_map<int, std::unique_ptr<GeneratorBase>> &&generator_for_value);
virtual void generate_applicable_ops(
const std::vector<int> &state, std::vector<OperatorID> &applicable_ops) const override;
};
class GeneratorSwitchSingle : public GeneratorBase {
int switch_var_id;
int value;
std::unique_ptr<GeneratorBase> generator_for_value;
public:
GeneratorSwitchSingle(
int switch_var_id, int value,
std::unique_ptr<GeneratorBase> generator_for_value);
virtual void generate_applicable_ops(
const std::vector<int> &state, std::vector<OperatorID> &applicable_ops) const override;
};
class GeneratorLeafVector : public GeneratorBase {
std::vector<OperatorID> applicable_operators;
public:
GeneratorLeafVector(std::vector<OperatorID> &&applicable_operators);
virtual void generate_applicable_ops(
const std::vector<int> &state, std::vector<OperatorID> &applicable_ops) const override;
};
class GeneratorLeafSingle : public GeneratorBase {
OperatorID applicable_operator;
public:
GeneratorLeafSingle(OperatorID applicable_operator);
virtual void generate_applicable_ops(
const std::vector<int> &state, std::vector<OperatorID> &applicable_ops) const override;
};
}
#endif
| 3,119 |
C
| 32.913043 | 95 | 0.725874 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_utils/task_properties.cc
|
#include "task_properties.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include "../utils/system.h"
#include <algorithm>
#include <iostream>
#include <limits>
using namespace std;
using utils::ExitCode;
namespace task_properties {
bool is_unit_cost(TaskProxy task) {
for (OperatorProxy op : task.get_operators()) {
if (op.get_cost() != 1)
return false;
}
return true;
}
bool has_axioms(TaskProxy task) {
return !task.get_axioms().empty();
}
void verify_no_axioms(TaskProxy task) {
if (has_axioms(task)) {
cerr << "This configuration does not support axioms!"
<< endl << "Terminating." << endl;
utils::exit_with(ExitCode::SEARCH_UNSUPPORTED);
}
}
static int get_first_conditional_effects_op_id(TaskProxy task) {
for (OperatorProxy op : task.get_operators()) {
for (EffectProxy effect : op.get_effects()) {
if (!effect.get_conditions().empty())
return op.get_id();
}
}
return -1;
}
bool has_conditional_effects(TaskProxy task) {
return get_first_conditional_effects_op_id(task) != -1;
}
void verify_no_conditional_effects(TaskProxy task) {
int op_id = get_first_conditional_effects_op_id(task);
if (op_id != -1) {
OperatorProxy op = task.get_operators()[op_id];
cerr << "This configuration does not support conditional effects "
<< "(operator " << op.get_name() << ")!" << endl
<< "Terminating." << endl;
utils::exit_with(ExitCode::SEARCH_UNSUPPORTED);
}
}
vector<int> get_operator_costs(const TaskProxy &task_proxy) {
vector<int> costs;
OperatorsProxy operators = task_proxy.get_operators();
costs.reserve(operators.size());
for (OperatorProxy op : operators)
costs.push_back(op.get_cost());
return costs;
}
double get_average_operator_cost(TaskProxy task_proxy) {
double average_operator_cost = 0;
for (OperatorProxy op : task_proxy.get_operators()) {
average_operator_cost += op.get_cost();
}
average_operator_cost /= task_proxy.get_operators().size();
return average_operator_cost;
}
int get_min_operator_cost(TaskProxy task_proxy) {
int min_cost = numeric_limits<int>::max();
for (OperatorProxy op : task_proxy.get_operators()) {
min_cost = min(min_cost, op.get_cost());
}
return min_cost;
}
int get_num_facts(const TaskProxy &task_proxy) {
int num_facts = 0;
for (VariableProxy var : task_proxy.get_variables())
num_facts += var.get_domain_size();
return num_facts;
}
int get_num_total_effects(const TaskProxy &task_proxy) {
int num_effects = 0;
for (OperatorProxy op : task_proxy.get_operators())
num_effects += op.get_effects().size();
num_effects += task_proxy.get_axioms().size();
return num_effects;
}
void print_variable_statistics(const TaskProxy &task_proxy) {
const int_packer::IntPacker &state_packer = g_state_packers[task_proxy];
int num_facts = 0;
VariablesProxy variables = task_proxy.get_variables();
for (VariableProxy var : variables)
num_facts += var.get_domain_size();
utils::g_log << "Variables: " << variables.size() << endl;
utils::g_log << "FactPairs: " << num_facts << endl;
utils::g_log << "Bytes per state: "
<< state_packer.get_num_bins() * sizeof(int_packer::IntPacker::Bin)
<< endl;
}
void dump_pddl(const State &state) {
for (FactProxy fact : state) {
string fact_name = fact.get_name();
if (fact_name != "<none of those>")
utils::g_log << fact_name << endl;
}
}
void dump_fdr(const State &state) {
for (FactProxy fact : state) {
VariableProxy var = fact.get_variable();
utils::g_log << " #" << var.get_id() << " [" << var.get_name() << "] -> "
<< fact.get_value() << endl;
}
}
void dump_goals(const GoalsProxy &goals) {
utils::g_log << "Goal conditions:" << endl;
for (FactProxy goal : goals) {
utils::g_log << " " << goal.get_variable().get_name() << ": "
<< goal.get_value() << endl;
}
}
void dump_task(const TaskProxy &task_proxy) {
OperatorsProxy operators = task_proxy.get_operators();
int min_action_cost = numeric_limits<int>::max();
int max_action_cost = 0;
for (OperatorProxy op : operators) {
min_action_cost = min(min_action_cost, op.get_cost());
max_action_cost = max(max_action_cost, op.get_cost());
}
utils::g_log << "Min action cost: " << min_action_cost << endl;
utils::g_log << "Max action cost: " << max_action_cost << endl;
VariablesProxy variables = task_proxy.get_variables();
utils::g_log << "Variables (" << variables.size() << "):" << endl;
for (VariableProxy var : variables) {
utils::g_log << " " << var.get_name()
<< " (range " << var.get_domain_size() << ")" << endl;
for (int val = 0; val < var.get_domain_size(); ++val) {
utils::g_log << " " << val << ": " << var.get_fact(val).get_name() << endl;
}
}
State initial_state = task_proxy.get_initial_state();
utils::g_log << "Initial state (PDDL):" << endl;
dump_pddl(initial_state);
utils::g_log << "Initial state (FDR):" << endl;
dump_fdr(initial_state);
dump_goals(task_proxy.get_goals());
}
PerTaskInformation<int_packer::IntPacker> g_state_packers(
[](const TaskProxy &task_proxy) {
VariablesProxy variables = task_proxy.get_variables();
vector<int> variable_ranges;
variable_ranges.reserve(variables.size());
for (VariableProxy var : variables) {
variable_ranges.push_back(var.get_domain_size());
}
return utils::make_unique_ptr<int_packer::IntPacker>(variable_ranges);
}
);
}
| 5,867 |
C++
| 31.241758 | 90 | 0.599114 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/exceptions.h
|
#ifndef UTILS_EXCEPTIONS_H
#define UTILS_EXCEPTIONS_H
namespace utils {
// Base class for custom exception types.
class Exception {
public:
virtual ~Exception() = default;
virtual void print() const = 0;
};
}
#endif
| 226 |
C
| 15.214285 | 41 | 0.707965 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/language.h
|
#ifndef UTILS_LANGUAGE_H
#define UTILS_LANGUAGE_H
#if defined(_MSC_VER)
#define NO_RETURN __declspec(noreturn)
#else
#define NO_RETURN __attribute__((noreturn))
#endif
namespace utils {
template<typename T>
void unused_variable(const T &) {
}
}
#endif
| 255 |
C
| 14.058823 | 43 | 0.72549 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/markup.h
|
#ifndef UTILS_MARKUP_H
#define UTILS_MARKUP_H
#include <string>
#include <vector>
namespace utils {
extern std::string format_conference_reference(
const std::vector<std::string> &authors, const std::string &title,
const std::string &url, const std::string &conference,
const std::string &pages, const std::string &publisher,
const std::string &year);
extern std::string format_journal_reference(
const std::vector<std::string> &authors, const std::string &title,
const std::string &url, const std::string &journal,
const std::string &volume, const std::string &pages,
const std::string &year);
}
#endif
| 640 |
C
| 28.136362 | 70 | 0.70625 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/system_unix.h
|
#ifndef UTILS_SYSTEM_UNIX_H
#define UTILS_SYSTEM_UNIX_H
#endif
| 64 |
C
| 11.999998 | 27 | 0.765625 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/timer.cc
|
#include "timer.h"
#include <ctime>
#include <ostream>
#if OPERATING_SYSTEM == LINUX || OPERATING_SYSTEM == OSX
#include <sys/time.h>
#endif
#if OPERATING_SYSTEM == OSX
#include <mach/mach_time.h>
#endif
using namespace std;
namespace utils {
ostream &operator<<(ostream &os, const Duration &time) {
os << static_cast<double>(time) << "s";
return os;
}
static double compute_sanitized_duration(double start_clock, double end_clock) {
/*
Sometimes we measure durations that are closer to 0 than should be physically possible
with measurements on a single CPU. Note that with a CPU frequency less than 10 GHz,
each clock cycle will take more than 1e-10 seconds. Even worse, these close-to-zero durations
are sometimes negative. We sanitize them to 0.
*/
double duration = end_clock - start_clock;
if (duration > -1e-10 && duration < 1e-10)
duration = 0.0;
return duration;
}
#if OPERATING_SYSTEM == OSX
void mach_absolute_difference(uint64_t end, uint64_t start, struct timespec *tp) {
uint64_t difference = end - start;
static mach_timebase_info_data_t info = {
0, 0
};
if (info.denom == 0)
mach_timebase_info(&info);
uint64_t elapsednano = difference * (info.numer / info.denom);
tp->tv_sec = elapsednano * 1e-9;
tp->tv_nsec = elapsednano - (tp->tv_sec * 1e9);
}
#endif
Timer::Timer(bool start) {
#if OPERATING_SYSTEM == WINDOWS
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&start_ticks);
#endif
collected_time = 0;
stopped = !start;
last_start_clock = start ? current_clock() : 0.;
}
double Timer::current_clock() const {
#if OPERATING_SYSTEM == WINDOWS
LARGE_INTEGER now_ticks;
QueryPerformanceCounter(&now_ticks);
double ticks = static_cast<double>(now_ticks.QuadPart - start_ticks.QuadPart);
return ticks / frequency.QuadPart;
#else
timespec tp;
#if OPERATING_SYSTEM == OSX
static uint64_t start = mach_absolute_time();
uint64_t end = mach_absolute_time();
mach_absolute_difference(end, start, &tp);
#else
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);
#endif
return tp.tv_sec + tp.tv_nsec / 1e9;
#endif
}
Duration Timer::stop() {
collected_time = (*this)();
stopped = true;
return Duration(collected_time);
}
Duration Timer::operator()() const {
if (stopped)
return Duration(collected_time);
else
return Duration(collected_time
+ compute_sanitized_duration(last_start_clock, current_clock()));
}
void Timer::resume() {
if (stopped) {
stopped = false;
last_start_clock = current_clock();
}
}
Duration Timer::reset() {
double result = (*this)();
collected_time = 0;
last_start_clock = current_clock();
return Duration(result);
}
ostream &operator<<(ostream &os, const Timer &timer) {
os << timer();
return os;
}
Timer g_timer;
}
| 2,948 |
C++
| 24.205128 | 101 | 0.651628 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/memory.cc
|
#include "memory.h"
#include "../utils/logging.h"
#include <cassert>
#include <iostream>
using namespace std;
namespace utils {
static char *extra_memory_padding = nullptr;
// Save standard out-of-memory handler.
static void (*standard_out_of_memory_handler)() = nullptr;
void continuing_out_of_memory_handler() {
release_extra_memory_padding();
utils::g_log << "Failed to allocate memory. Released extra memory padding." << endl;
}
void reserve_extra_memory_padding(int memory_in_mb) {
assert(!extra_memory_padding);
extra_memory_padding = new char[memory_in_mb * 1024 * 1024];
standard_out_of_memory_handler = set_new_handler(continuing_out_of_memory_handler);
}
void release_extra_memory_padding() {
assert(extra_memory_padding);
delete[] extra_memory_padding;
extra_memory_padding = nullptr;
assert(standard_out_of_memory_handler);
set_new_handler(standard_out_of_memory_handler);
}
bool extra_memory_padding_is_reserved() {
return extra_memory_padding;
}
}
| 1,015 |
C++
| 25.051281 | 88 | 0.714286 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/rng_options.cc
|
#include "rng_options.h"
#include "rng.h"
#include "../options/option_parser.h"
using namespace std;
namespace utils {
void add_rng_options(options::OptionParser &parser) {
parser.add_option<int>(
"random_seed",
"Set to -1 (default) to use the global random number generator. "
"Set to any other value to use a local random number generator with "
"the given seed.",
"-1",
options::Bounds("-1", "infinity"));
}
shared_ptr<RandomNumberGenerator> parse_rng_from_options(
const options::Options &options) {
int seed = options.get<int>("random_seed");
if (seed == -1) {
// Use an arbitrary default seed.
static shared_ptr<utils::RandomNumberGenerator> rng =
make_shared<utils::RandomNumberGenerator>(2011);
return rng;
} else {
return make_shared<RandomNumberGenerator>(seed);
}
}
}
| 900 |
C++
| 26.30303 | 77 | 0.632222 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/system.h
|
#ifndef UTILS_SYSTEM_H
#define UTILS_SYSTEM_H
#define LINUX 0
#define OSX 1
#define WINDOWS 2
#if defined(_WIN32)
#define OPERATING_SYSTEM WINDOWS
#include "system_windows.h"
#elif defined(__APPLE__)
#define OPERATING_SYSTEM OSX
#include "system_unix.h"
#else
#define OPERATING_SYSTEM LINUX
#include "system_unix.h"
#endif
#include "language.h"
#include <iostream>
#include <stdlib.h>
#define ABORT(msg) \
( \
(std::cerr << "Critical error in file " << __FILE__ \
<< ", line " << __LINE__ << ": " << std::endl \
<< (msg) << std::endl), \
(abort()), \
(void)0 \
)
namespace utils {
enum class ExitCode {
/*
For a full list of exit codes, please see driver/returncodes.py. Here,
we only list codes that are used by the search component of the planner.
*/
// 0-9: exit codes denoting a plan was found
SUCCESS = 0,
// 10-19: exit codes denoting no plan was found (without any error)
SEARCH_UNSOLVABLE = 11, // Task is provably unsolvable with given bound.
SEARCH_UNSOLVED_INCOMPLETE = 12, // Search ended without finding a solution.
// 20-29: "expected" failures
SEARCH_OUT_OF_MEMORY = 22,
SEARCH_OUT_OF_TIME = 23,
// 30-39: unrecoverable errors
SEARCH_CRITICAL_ERROR = 32,
SEARCH_INPUT_ERROR = 33,
SEARCH_UNSUPPORTED = 34
};
NO_RETURN extern void exit_with(ExitCode returncode);
NO_RETURN extern void exit_after_receiving_signal(ExitCode returncode);
int get_peak_memory_in_kb();
const char *get_exit_code_message_reentrant(ExitCode exitcode);
bool is_exit_code_error_reentrant(ExitCode exitcode);
void register_event_handlers();
void report_exit_code_reentrant(ExitCode exitcode);
int get_process_id();
}
#endif
| 1,762 |
C
| 24.92647 | 81 | 0.66118 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/logging.h
|
#ifndef UTILS_LOGGING_H
#define UTILS_LOGGING_H
#include "system.h"
#include "timer.h"
#include <ostream>
#include <string>
#include <vector>
namespace options {
class OptionParser;
}
namespace utils {
/*
Simple logger that prepends time and peak memory info to messages.
Logs are written to stdout.
Usage:
utils::g_log << "States: " << num_states << endl;
*/
class Log {
private:
bool line_has_started = false;
public:
template<typename T>
Log &operator<<(const T &elem) {
if (!line_has_started) {
line_has_started = true;
std::cout << "[t=" << g_timer << ", "
<< get_peak_memory_in_kb() << " KB] ";
}
std::cout << elem;
return *this;
}
using manip_function = std::ostream &(*)(std::ostream &);
Log &operator<<(manip_function f) {
if (f == static_cast<manip_function>(&std::endl)) {
line_has_started = false;
}
std::cout << f;
return *this;
}
};
extern Log g_log;
// See add_verbosity_option_to_parser for documentation.
enum class Verbosity {
SILENT,
NORMAL,
VERBOSE,
DEBUG
};
extern void add_verbosity_option_to_parser(options::OptionParser &parser);
class TraceBlock {
std::string block_name;
public:
explicit TraceBlock(const std::string &block_name);
~TraceBlock();
};
extern void trace(const std::string &msg = "");
}
namespace std {
template<class T>
ostream &operator<<(ostream &stream, const vector<T> &vec) {
stream << "[";
for (size_t i = 0; i < vec.size(); ++i) {
if (i != 0)
stream << ", ";
stream << vec[i];
}
stream << "]";
return stream;
}
}
#endif
| 1,722 |
C
| 18.579545 | 74 | 0.572009 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/logging.cc
|
#include "logging.h"
#include "system.h"
#include "timer.h"
#include "../option_parser.h"
#include <iomanip>
#include <iostream>
#include <vector>
using namespace std;
namespace utils {
void add_verbosity_option_to_parser(options::OptionParser &parser) {
vector<string> verbosity_levels;
vector<string> verbosity_level_docs;
verbosity_levels.push_back("silent");
verbosity_level_docs.push_back(
"only the most basic output");
verbosity_levels.push_back("normal");
verbosity_level_docs.push_back(
"relevant information to monitor progress");
verbosity_levels.push_back("verbose");
verbosity_level_docs.push_back(
"full output");
verbosity_levels.push_back("debug");
verbosity_level_docs.push_back(
"like full with additional debug output");
parser.add_enum_option<Verbosity>(
"verbosity",
verbosity_levels,
"Option to specify the verbosity level.",
"normal",
verbosity_level_docs);
}
class MemoryTracer {
// The following constants affect the formatting of output.
static const int INDENT_AMOUNT = 2;
static const int MEM_FIELD_WIDTH = 7;
static const int TIME_FIELD_WIDTH = 7;
vector<string> block_stack;
public:
MemoryTracer();
~MemoryTracer();
void enter_block(const string &block_name);
void leave_block(const string &block_name);
void print_trace_message(const string &msg);
};
static MemoryTracer _tracer;
MemoryTracer::MemoryTracer() {
}
MemoryTracer::~MemoryTracer() {
if (!block_stack.empty())
ABORT("oops!");
}
void MemoryTracer::enter_block(const string &block_name) {
_tracer.print_trace_message("enter " + block_name);
block_stack.push_back(block_name);
}
void MemoryTracer::leave_block(const string &block_name) {
if (block_stack.empty() || block_stack.back() != block_name)
ABORT("oops!");
block_stack.pop_back();
_tracer.print_trace_message("leave " + block_name);
}
void MemoryTracer::print_trace_message(const string &msg) {
g_log << "[TRACE] "
<< setw(TIME_FIELD_WIDTH) << g_timer << " "
<< setw(MEM_FIELD_WIDTH) << get_peak_memory_in_kb() << " KB";
for (size_t i = 0; i < block_stack.size() * INDENT_AMOUNT; ++i)
g_log << ' ';
g_log << ' ' << msg << endl;
}
TraceBlock::TraceBlock(const string &block_name)
: block_name(block_name) {
_tracer.enter_block(block_name);
}
TraceBlock::~TraceBlock() {
_tracer.leave_block(block_name);
}
void trace(const string &msg) {
_tracer.print_trace_message(msg);
}
Log g_log;
}
| 2,611 |
C++
| 22.963303 | 71 | 0.647262 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/hash.h
|
#ifndef UTILS_HASH_H
#define UTILS_HASH_H
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
namespace utils {
/*
We provide a family of hash functions that are supposedly higher
quality than what is guaranteed by the standard library. Changing a
single bit in the input should typically change around half of the
bits in the final hash value. The hash functions we previously used
turned out to cluster when we tried hash tables with open addressing
for state registries.
The low-level hash functions are based on lookup3.c by Bob Jenkins,
May 2006, public domain. See http://www.burtleburtle.net/bob/c/lookup3.c.
To hash an object x, it is represented as a sequence of 32-bit
pieces (called the "code" for x, written code(x) in the following)
that are "fed" to the main hashing function (implemented in class
HashState) one by one. This allows a compositional approach to
hashing. For example, the code for a pair p is the concatenation of
code(x.first) and code(x.second).
A simpler compositional approach to hashing would first hash the
components of an object and then combine the hash values, and this
is what a previous version of our code did. The approach with an
explicit HashState object is stronger because the internal hash
state is larger (96 bits) than the final hash value and hence pairs
<x, y> and <x', y> where x and x' have the same hash value don't
necessarily collide. Another advantage of our approach is that we
can use the same overall hashing approach to generate hash values of
different types (e.g. 32-bit vs. 64-bit unsigned integers).
To extend the hashing mechanism to further classes, provide a
template specialization for the "feed" function. This must satisfy
the following requirements:
A) If x and y are objects of the same type, they should have code(x)
= code(y) iff x = y. That is, the code sequence should uniquely
describe each logically distinct object.
This requirement avoids unnecessary hash collisions. Of course,
there will still be "necessary" hash collisions because different
code sequences can collide in the low-level hash function.
B) To play nicely with composition, we additionally require that feed
implements a prefix code, i.e., for objects x != y of the same
type, code(x) must not be a prefix of code(y).
This requirement makes it much easier to define non-colliding
code sequences for composite objects such as pairs via
concatenation: if <a, b> != <a', b'>, then code(a) != code(a')
and code(b) != code(b') is *not* sufficient for concat(code(a),
code(b)) != concat(code(a'), code(b')). However, if we require a
prefix code, it *is* sufficient and the resulting code will again
be a prefix code.
Note that objects "of the same type" is meant as "logical type"
rather than C++ type.
For example, for objects such as vectors where we expect
different-length vectors to be combined in the same containers (=
have the same logical type), we include the length of the vector as
the first element in the code to ensure the prefix code property.
In contrast, for integer arrays encoding states, we *do not* include
the length as a prefix because states of different sizes are
considered to be different logical types and should not be mixed in
the same container, even though they are represented by the same C++
type.
*/
/*
Circular rotation (http://stackoverflow.com/a/31488147/224132).
*/
inline uint32_t rotate(uint32_t value, uint32_t offset) {
return (value << offset) | (value >> (32 - offset));
}
/*
Store the state of the hashing process.
This class can either be used by specializing the template function
utils::feed() (recommended, see below), or by working with it directly.
*/
class HashState {
std::uint32_t a, b, c;
int pending_values;
/*
Mix the three 32-bit values bijectively.
Any information in (a, b, c) before mix() is still in (a, b, c) after
mix().
*/
void mix() {
a -= c;
a ^= rotate(c, 4);
c += b;
b -= a;
b ^= rotate(a, 6);
a += c;
c -= b;
c ^= rotate(b, 8);
b += a;
a -= c;
a ^= rotate(c, 16);
c += b;
b -= a;
b ^= rotate(a, 19);
a += c;
c -= b;
c ^= rotate(b, 4);
b += a;
}
/*
Final mixing of the three 32-bit values (a, b, c) into c.
Triples of (a, b, c) differing in only a few bits will usually produce
values of c that look totally different.
*/
void final_mix() {
c ^= b;
c -= rotate(b, 14);
a ^= c;
a -= rotate(c, 11);
b ^= a;
b -= rotate(a, 25);
c ^= b;
c -= rotate(b, 16);
a ^= c;
a -= rotate(c, 4);
b ^= a;
b -= rotate(a, 14);
c ^= b;
c -= rotate(b, 24);
}
public:
HashState()
: a(0xdeadbeef),
b(a),
c(a),
pending_values(0) {
}
void feed(std::uint32_t value) {
assert(pending_values != -1);
if (pending_values == 3) {
mix();
pending_values = 0;
}
if (pending_values == 0) {
a += value;
++pending_values;
} else if (pending_values == 1) {
b += value;
++pending_values;
} else if (pending_values == 2) {
c += value;
++pending_values;
}
}
/*
After calling this method, it is illegal to use the HashState object
further, i.e., make further calls to feed, get_hash32 or get_hash64. We
set pending_values = -1 to catch such illegal usage in debug mode.
*/
std::uint32_t get_hash32() {
assert(pending_values != -1);
if (pending_values) {
/*
pending_values == 0 can only hold if we never called
feed(), i.e., if we are hashing an empty sequence.
In this case we don't call final_mix for compatibility
with the original hash function by Jenkins.
*/
final_mix();
}
pending_values = -1;
return c;
}
/*
See comment for get_hash32.
*/
std::uint64_t get_hash64() {
assert(pending_values != -1);
if (pending_values) {
// See comment for get_hash32.
final_mix();
}
pending_values = -1;
return (static_cast<std::uint64_t>(b) << 32) | c;
}
};
/*
These functions add a new object to an existing HashState object.
To add hashing support for a user type X, provide an override
for utils::feed(HashState &hash_state, const X &value).
*/
static_assert(
sizeof(int) == sizeof(std::uint32_t),
"int and uint32_t have different sizes");
inline void feed(HashState &hash_state, int value) {
hash_state.feed(static_cast<std::uint32_t>(value));
}
static_assert(
sizeof(unsigned int) == sizeof(std::uint32_t),
"unsigned int and uint32_t have different sizes");
inline void feed(HashState &hash_state, unsigned int value) {
hash_state.feed(static_cast<std::uint32_t>(value));
}
inline void feed(HashState &hash_state, std::uint64_t value) {
hash_state.feed(static_cast<std::uint32_t>(value));
value >>= 32;
hash_state.feed(static_cast<std::uint32_t>(value));
}
template<typename T>
void feed(HashState &hash_state, const T *p) {
// This is wasteful in 32-bit mode, but we plan to discontinue 32-bit compiles anyway.
feed(hash_state, reinterpret_cast<std::uint64_t>(p));
}
template<typename T1, typename T2>
void feed(HashState &hash_state, const std::pair<T1, T2> &p) {
feed(hash_state, p.first);
feed(hash_state, p.second);
}
template<typename T>
void feed(HashState &hash_state, const std::vector<T> &vec) {
/*
Feed vector size to ensure that no two different vectors of the same type
have the same code prefix.
Using uint64_t is wasteful on 32-bit platforms but feeding a size_t breaks
the build on MacOS (see msg7812).
*/
feed(hash_state, static_cast<uint64_t>(vec.size()));
for (const T &item : vec) {
feed(hash_state, item);
}
}
/*
Public hash functions.
get_hash() is used internally by the HashMap and HashSet classes below. In
more exotic use cases, such as implementing a custom hash table, you can also
use `get_hash32()`, `get_hash64()` and `get_hash()` directly.
*/
template<typename T>
std::uint32_t get_hash32(const T &value) {
HashState hash_state;
feed(hash_state, value);
return hash_state.get_hash32();
}
template<typename T>
std::uint64_t get_hash64(const T &value) {
HashState hash_state;
feed(hash_state, value);
return hash_state.get_hash64();
}
template<typename T>
std::size_t get_hash(const T &value) {
return static_cast<std::size_t>(get_hash64(value));
}
// This struct should only be used by HashMap and HashSet below.
template<typename T>
struct Hash {
std::size_t operator()(const T &val) const {
return get_hash(val);
}
};
/*
Aliases for hash sets and hash maps in user code.
Use these aliases for hashing types T that don't have a standard std::hash<T>
specialization.
To hash types that are not supported out of the box, implement utils::feed.
*/
template<typename T1, typename T2>
using HashMap = std::unordered_map<T1, T2, Hash<T1>>;
template<typename T>
using HashSet = std::unordered_set<T, Hash<T>>;
}
#endif
| 9,671 |
C
| 29.900958 | 90 | 0.634371 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/rng_options.h
|
#ifndef UTILS_RNG_OPTIONS_H
#define UTILS_RNG_OPTIONS_H
#include <memory>
namespace options {
class OptionParser;
class Options;
}
namespace utils {
class RandomNumberGenerator;
// Add random_seed option to parser.
extern void add_rng_options(options::OptionParser &parser);
/*
Return an RNG based on the given options, which can either be the global
RNG or a local one with a user-specified seed. Only use this together with
"add_rng_options()".
*/
extern std::shared_ptr<RandomNumberGenerator> parse_rng_from_options(
const options::Options &options);
}
#endif
| 579 |
C
| 20.481481 | 76 | 0.753022 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/collections.h
|
#ifndef UTILS_COLLECTIONS_H
#define UTILS_COLLECTIONS_H
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <functional>
#include <iterator>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace utils {
template<class T>
extern void sort_unique(std::vector<T> &vec) {
std::sort(vec.begin(), vec.end());
vec.erase(std::unique(vec.begin(), vec.end()), vec.end());
}
template<class T>
extern bool is_sorted_unique(const std::vector<T> &values) {
for (size_t i = 1; i < values.size(); ++i) {
if (values[i - 1] >= values[i])
return false;
}
return true;
}
template<class T>
bool in_bounds(int index, const T &container) {
return index >= 0 && static_cast<size_t>(index) < container.size();
}
template<class T>
bool in_bounds(long index, const T &container) {
return index >= 0 && static_cast<size_t>(index) < container.size();
}
template<class T>
bool in_bounds(size_t index, const T &container) {
return index < container.size();
}
template<typename T>
T swap_and_pop_from_vector(std::vector<T> &vec, size_t pos) {
assert(in_bounds(pos, vec));
T element = vec[pos];
std::swap(vec[pos], vec.back());
vec.pop_back();
return element;
}
template<class T>
void release_vector_memory(std::vector<T> &vec) {
std::vector<T>().swap(vec);
}
template<class KeyType, class ValueType>
ValueType get_value_or_default(
const std::unordered_map<KeyType, ValueType> &dict,
const KeyType &key,
const ValueType &default_value) {
auto it = dict.find(key);
if (it != dict.end()) {
return it->second;
}
return default_value;
}
template<typename ElemTo, typename Collection, typename MapFunc>
std::vector<ElemTo> map_vector(const Collection &collection, MapFunc map_func) {
std::vector<ElemTo> transformed;
transformed.reserve(collection.size());
std::transform(begin(collection), end(collection),
std::back_inserter(transformed), map_func);
return transformed;
}
template<typename T, typename Collection>
std::vector<T> sorted(Collection &&collection) {
std::vector<T> vec(std::forward<Collection>(collection));
std::sort(vec.begin(), vec.end());
return vec;
}
template<typename T>
int estimate_vector_bytes(int num_elements) {
/*
This estimate is based on a study of the C++ standard library
that shipped with gcc around the year 2017. It does not claim to
be accurate and may certainly be inaccurate for other compilers
or compiler versions.
*/
int size = 0;
size += 2 * sizeof(void *); // overhead for dynamic memory management
size += sizeof(std::vector<T>); // size of empty vector
size += num_elements * sizeof(T); // size of actual entries
return size;
}
template<typename T>
int _estimate_hash_table_bytes(int num_entries) {
/*
The same comments as for estimate_vector_bytes apply.
Additionally, there may be alignment issues, especially on
64-bit systems, that make this estimate too optimistic for
certain cases.
*/
assert(num_entries < (1 << 28));
/*
Having num_entries < 2^28 is necessary but not sufficient for
the result value to not overflow. If we ever change this
function to support larger data structures (using a size_t
return value), we must update the list of bounds below (taken
from the gcc library source).
*/
int num_buckets = 0;
const auto bounds = {
2, 5, 11, 23, 47, 97, 199, 409, 823, 1741, 3469, 6949, 14033,
28411, 57557, 116731, 236897, 480881, 976369, 1982627, 4026031,
8175383, 16601593, 33712729, 68460391, 139022417, 282312799
};
for (int bound : bounds) {
if (num_entries < bound) {
num_buckets = bound;
break;
}
}
int size = 0;
size += 2 * sizeof(void *); // overhead for dynamic memory management
size += sizeof(T); // empty container
using Entry = typename T::value_type;
size += num_entries * sizeof(Entry); // actual entries
size += num_entries * sizeof(Entry *); // pointer to values
size += num_entries * sizeof(void *); // pointer to next node
size += num_buckets * sizeof(void *); // pointer to next bucket
return size;
}
template<typename T>
int estimate_unordered_set_bytes(int num_entries) {
// See comments for _estimate_hash_table_bytes.
return _estimate_hash_table_bytes<std::unordered_set<T>>(num_entries);
}
template<typename Key, typename Value>
int estimate_unordered_map_bytes(int num_entries) {
// See comments for _estimate_hash_table_bytes.
return _estimate_hash_table_bytes<std::unordered_map<Key, Value>>(num_entries);
}
}
#endif
| 4,886 |
C
| 30.127388 | 100 | 0.638354 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/countdown_timer.cc
|
#include "countdown_timer.h"
#include <limits>
using namespace std;
namespace utils {
CountdownTimer::CountdownTimer(double max_time)
: max_time(max_time) {
}
CountdownTimer::~CountdownTimer() {
}
bool CountdownTimer::is_expired() const {
/*
We avoid querying the timer when it cannot expire so that we get cleaner
output from "strace" (which otherwise reports the "times" system call
millions of times.
*/
return max_time != numeric_limits<double>::infinity() && timer() >= max_time;
}
Duration CountdownTimer::get_elapsed_time() const {
return timer();
}
Duration CountdownTimer::get_remaining_time() const {
return Duration(max_time - get_elapsed_time());
}
}
| 712 |
C++
| 21.281249 | 81 | 0.691011 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/math.h
|
#ifndef UTILS_MATH_H
#define UTILS_MATH_H
namespace utils {
/* Test if the product of two numbers is bounded by a third number.
Safe against overflow. The caller must guarantee
0 <= factor1, factor2 <= limit; failing this is an error. */
extern bool is_product_within_limit(int factor1, int factor2, int limit);
/* Test if the product of two numbers falls between the given inclusive lower
and upper bounds. Safe against overflow. The caller must guarantee
lower_limit < 0 and upper_limit >= 0; failing this is an error. */
extern bool is_product_within_limits(
int factor1, int factor2, int lower_limit, int upper_limit);
}
#endif
| 651 |
C
| 35.22222 | 77 | 0.737327 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/strings.cc
|
#include "strings.h"
#include <algorithm>
#include <iostream>
using namespace std;
namespace utils {
StringOperationError::StringOperationError(const string &msg)
: msg(msg) {
}
void StringOperationError::print() const {
cerr << msg << endl;
}
void lstrip(string &s) {
s.erase(s.begin(), find_if(s.begin(), s.end(), [](int ch) {
return !isspace(ch);
}));
}
void rstrip(string &s) {
s.erase(find_if(s.rbegin(), s.rend(), [](int ch) {
return !isspace(ch);
}).base(), s.end());
}
void strip(string &s) {
lstrip(s);
rstrip(s);
}
bool startswith(const string &s, const string &prefix) {
return s.compare(0, prefix.size(), prefix) == 0;
}
pair<string, string> split(const string &s, const string &separator) {
int split_pos = s.find(separator);
if (split_pos == -1) {
throw StringOperationError("separator not found");
}
string lhs = s.substr(0, split_pos);
string rhs = s.substr(split_pos + 1);
return make_pair(lhs, rhs);
}
}
| 1,102 |
C++
| 21.510204 | 70 | 0.564428 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/system_windows.h
|
#ifndef UTILS_SYSTEM_WINDOWS_H
#define UTILS_SYSTEM_WINDOWS_H
#include "system.h"
#if OPERATING_SYSTEM == WINDOWS
// Avoid min/max conflicts (http://support.microsoft.com/kb/143208).
#ifndef NOMINMAX
#define NOMINMAX
#endif
/* Speed up build process by skipping some includes
(https://support.microsoft.com/de-ch/kb/166474). */
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#endif
#endif
| 435 |
C
| 17.956521 | 68 | 0.742529 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/markup.cc
|
#include "markup.h"
#include <cassert>
#include <sstream>
using namespace std;
namespace utils {
static string t2t_escape(const string &s) {
return "\"\"" + s + "\"\"";
}
static string format_authors(const vector<string> &authors) {
assert(!authors.empty());
int num_authors = authors.size();
stringstream ss;
for (int i = 0; i < num_authors; ++i) {
const string &author = authors[i];
ss << t2t_escape(author);
if (i < num_authors - 2) {
ss << ", ";
} else if (i == num_authors - 2) {
ss << " and ";
}
}
return ss.str();
}
string format_conference_reference(
const vector<string> &authors, const string &title, const string &url,
const string &conference, const string &pages, const string &publisher,
const string &year) {
ostringstream ss;
ss << "\n\n"
<< " * " << format_authors(authors) << ".<<BR>>\n"
<< " [" << t2t_escape(title) << " " << url << "].<<BR>>\n"
<< " In //" << t2t_escape(conference) << "//";
if (!pages.empty())
ss << ", pp. " << t2t_escape(pages);
ss << ". ";
if (!publisher.empty())
ss << t2t_escape(publisher) << ", ";
ss << t2t_escape(year) << ".\n\n\n";
return ss.str();
}
string format_journal_reference(
const vector<string> &authors, const string &title, const string &url,
const string &journal, const string &volume, const string &pages,
const string &year) {
ostringstream ss;
ss << "\n\n"
<< " * " << format_authors(authors) << ".<<BR>>\n"
<< " [" << t2t_escape(title) << " " << url << "].<<BR>>\n"
<< " //" << t2t_escape(journal) << "// "
<< t2t_escape(volume) << ":" << t2t_escape(pages) << ". "
<< t2t_escape(year) << ".\n\n\n";
return ss.str();
}
}
| 1,819 |
C++
| 28.836065 | 75 | 0.521715 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/system.cc
|
#include "system.h"
#include <cstdlib>
using namespace std;
namespace utils {
const char *get_exit_code_message_reentrant(ExitCode exitcode) {
switch (exitcode) {
case ExitCode::SUCCESS:
return "Solution found.";
case ExitCode::SEARCH_CRITICAL_ERROR:
return "Unexplained error occurred.";
case ExitCode::SEARCH_INPUT_ERROR:
return "Usage error occurred.";
case ExitCode::SEARCH_UNSUPPORTED:
return "Tried to use unsupported feature.";
case ExitCode::SEARCH_UNSOLVABLE:
return "Task is provably unsolvable.";
case ExitCode::SEARCH_UNSOLVED_INCOMPLETE:
return "Search stopped without finding a solution.";
case ExitCode::SEARCH_OUT_OF_MEMORY:
return "Memory limit has been reached.";
case ExitCode::SEARCH_OUT_OF_TIME:
return "Time limit has been reached.";
default:
return nullptr;
}
}
bool is_exit_code_error_reentrant(ExitCode exitcode) {
switch (exitcode) {
case ExitCode::SUCCESS:
case ExitCode::SEARCH_UNSOLVABLE:
case ExitCode::SEARCH_UNSOLVED_INCOMPLETE:
case ExitCode::SEARCH_OUT_OF_MEMORY:
case ExitCode::SEARCH_OUT_OF_TIME:
return false;
case ExitCode::SEARCH_CRITICAL_ERROR:
case ExitCode::SEARCH_INPUT_ERROR:
case ExitCode::SEARCH_UNSUPPORTED:
default:
return true;
}
}
void exit_with(ExitCode exitcode) {
report_exit_code_reentrant(exitcode);
exit(static_cast<int>(exitcode));
}
void exit_after_receiving_signal(ExitCode exitcode) {
/*
In signal handlers, we have to use the "safe function" _Exit() rather
than the unsafe function exit().
*/
report_exit_code_reentrant(exitcode);
_Exit(static_cast<int>(exitcode));
}
}
| 1,749 |
C++
| 27.688524 | 75 | 0.67753 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/rng.h
|
#ifndef UTILS_RNG_H
#define UTILS_RNG_H
#include <algorithm>
#include <cassert>
#include <random>
#include <vector>
namespace utils {
class RandomNumberGenerator {
// Mersenne Twister random number generator.
std::mt19937 rng;
public:
RandomNumberGenerator(); // Seed with a value depending on time and process ID.
explicit RandomNumberGenerator(int seed);
RandomNumberGenerator(const RandomNumberGenerator &) = delete;
RandomNumberGenerator &operator=(const RandomNumberGenerator &) = delete;
~RandomNumberGenerator();
void seed(int seed);
// Return random double in [0..1).
double operator()() {
std::uniform_real_distribution<double> distribution(0.0, 1.0);
return distribution(rng);
}
// Return random integer in [0..bound).
int operator()(int bound) {
assert(bound > 0);
std::uniform_int_distribution<int> distribution(0, bound - 1);
return distribution(rng);
}
template<typename T>
typename std::vector<T>::const_iterator choose(const std::vector<T> &vec) {
return vec.begin() + operator()(vec.size());
}
template<typename T>
typename std::vector<T>::iterator choose(std::vector<T> &vec) {
return vec.begin() + operator()(vec.size());
}
template<typename T>
void shuffle(std::vector<T> &vec) {
std::shuffle(vec.begin(), vec.end(), rng);
}
};
}
#endif
| 1,423 |
C
| 25.37037 | 83 | 0.650035 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/memory.h
|
#ifndef UTILS_MEMORY_H
#define UTILS_MEMORY_H
#include <memory>
#include <utility>
namespace utils {
/*
make_unique_ptr is a poor man's version of make_unique. Once we
require C++14, we should change all occurrences of make_unique_ptr
to make_unique.
*/
template<typename T, typename ... Args>
std::unique_ptr<T> make_unique_ptr(Args && ... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args) ...));
}
/*
Reserve some memory that we can release and be able to continue
afterwards, once we hit the memory limit. Due to memory fragmentation
the planner often doesn't have enough memory to continue if we don't
reserve enough memory. For CEGAR heuristics reserving 75 MB worked
best.
The interface assumes a single user. It is not possible for two parts
of the planner to reserve extra memory padding at the same time.
*/
extern void reserve_extra_memory_padding(int memory_in_mb);
extern void release_extra_memory_padding();
extern bool extra_memory_padding_is_reserved();
}
#endif
| 1,022 |
C
| 29.088234 | 71 | 0.736791 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/system_windows.cc
|
#include "system_windows.h"
#if OPERATING_SYSTEM == WINDOWS
// TODO: find re-entrant alternatives on Windows.
#include <csignal>
#include <ctime>
#include <iostream>
#include <process.h>
#include <psapi.h>
using namespace std;
namespace utils {
void out_of_memory_handler() {
cout << "Failed to allocate memory." << endl;
exit_with(ExitCode::SEARCH_OUT_OF_MEMORY);
}
void signal_handler(int signal_number) {
cout << "Peak memory: "
<< get_peak_memory_in_kb() << " KB" << endl;
cout << "caught signal " << signal_number
<< " -- exiting" << endl;
raise(signal_number);
}
int get_peak_memory_in_kb() {
PROCESS_MEMORY_COUNTERS_EX pmc;
bool success = GetProcessMemoryInfo(
GetCurrentProcess(),
reinterpret_cast<PROCESS_MEMORY_COUNTERS *>(&pmc),
sizeof(pmc));
if (!success) {
cerr << "warning: could not determine peak memory" << endl;
return -1;
}
return pmc.PeakPagefileUsage / 1024;
}
void register_event_handlers() {
// Terminate when running out of memory.
set_new_handler(out_of_memory_handler);
/*
On Windows, sigaction() is not available, so we use the deprecated
alternative signal(). The main difference is that signal() does not block
other signals while the signal handler is running. This can lead to race
conditions and undefined behaviour. For details, see
http://stackoverflow.com/questions/231912
*/
signal(SIGABRT, signal_handler);
signal(SIGTERM, signal_handler);
signal(SIGSEGV, signal_handler);
signal(SIGINT, signal_handler);
// SIGXCPU is not supported on Windows.
}
void report_exit_code_reentrant(ExitCode exitcode) {
const char *message = get_exit_code_message_reentrant(exitcode);
bool is_error = is_exit_code_error_reentrant(exitcode);
if (message) {
ostream &stream = is_error ? cerr : cout;
stream << message << endl;
} else {
cerr << "Exitcode: " << static_cast<int>(exitcode) << endl
<< "Unknown exitcode." << endl;
abort();
}
}
int get_process_id() {
return _getpid();
}
}
#endif
| 2,152 |
C++
| 26.253164 | 79 | 0.640799 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/system_unix.cc
|
#include "system.h"
#if OPERATING_SYSTEM == LINUX || OPERATING_SYSTEM == OSX
/*
NOTE:
Methods with the suffix "_reentrant" are meant to be used in event
handlers. They should all be "re-entrant", i.e. they must not use
static variables, global data, or locks. Only some system calls such as
open/read/write/close are guaranteed to be re-entrant. See
https://www.securecoding.cert.org/confluence/display/seccode/
SIG30-C.+Call+only+asynchronous-safe+functions+within+signal+handlers
#SIG30-C.Callonlyasynchronous-safefunctionswithinsignalhandlers-
Asynchronous-Signal-SafeFunctions
for a complete list.
We also use some low level string methods where re-entrancy is not
guaranteed but very likely with most compilers. If these ever cause
any problems, we will have to replace them by re-entrant
implementations.
See also: issue479
*/
#include "system_unix.h"
#include <csignal>
#include <cstdio>
#include <cstring>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <fstream>
#include <iostream>
#include <limits>
#include <new>
#include <stdlib.h>
#include <unistd.h>
#if OPERATING_SYSTEM == OSX
#include <mach/mach.h>
#endif
using namespace std;
namespace utils {
void write_reentrant(int filedescr, const char *message, int len) {
while (len > 0) {
int written;
do {
written = write(filedescr, message, len);
} while (written == -1 && errno == EINTR);
/*
We could check for other values of errno here but all errors except
EINTR are catastrophic enough to abort, so we do not need the
distinction.
*/
if (written == -1)
abort();
message += written;
len -= written;
}
}
void write_reentrant_str(int filedescr, const char *message) {
write_reentrant(filedescr, message, strlen(message));
}
void write_reentrant_char(int filedescr, char c) {
write_reentrant(filedescr, &c, 1);
}
void write_reentrant_int(int filedescr, int value) {
char buffer[32];
int len = snprintf(buffer, sizeof(buffer), "%d", value);
if (len < 0)
abort();
write_reentrant(filedescr, buffer, len);
}
bool read_char_reentrant(int filedescr, char *c) {
int result;
do {
result = read(filedescr, c, 1);
} while (result == -1 && errno == EINTR);
/*
We could check for other values of errno here but all errors except
EINTR are catastrophic enough to abort, so we do not need the
distinction.
*/
if (result == -1)
abort();
return result == 1;
}
void print_peak_memory_reentrant() {
#if OPERATING_SYSTEM == OSX
// TODO: Write print_peak_memory_reentrant() for OS X.
write_reentrant_str(STDOUT_FILENO, "Peak memory: ");
write_reentrant_int(STDOUT_FILENO, get_peak_memory_in_kb());
write_reentrant_str(STDOUT_FILENO, " KB\n");
#else
int proc_file_descr = TEMP_FAILURE_RETRY(open("/proc/self/status", O_RDONLY));
if (proc_file_descr == -1) {
write_reentrant_str(
STDERR_FILENO,
"critical error: could not open /proc/self/status\n");
abort();
}
const char magic[] = "\nVmPeak:";
char c;
size_t pos_magic = 0;
const size_t len_magic = sizeof(magic) - 1;
// Find magic word.
while (pos_magic != len_magic && read_char_reentrant(proc_file_descr, &c)) {
if (c == magic[pos_magic]) {
++pos_magic;
} else {
pos_magic = 0;
}
}
if (pos_magic != len_magic) {
write_reentrant_str(
STDERR_FILENO,
"critical error: could not find VmPeak in /proc/self/status\n");
abort();
}
write_reentrant_str(STDOUT_FILENO, "Peak memory: ");
// Skip over whitespace.
while (read_char_reentrant(proc_file_descr, &c) && isspace(c))
;
do {
write_reentrant_char(STDOUT_FILENO, c);
} while (read_char_reentrant(proc_file_descr, &c) && !isspace(c));
write_reentrant_str(STDOUT_FILENO, " KB\n");
/*
Ignore potential errors other than EINTR (there is nothing we can do
about I/O errors or bad file descriptors here).
*/
TEMP_FAILURE_RETRY(close(proc_file_descr));
#endif
}
#if OPERATING_SYSTEM == LINUX
void exit_handler(int, void *) {
#elif OPERATING_SYSTEM == OSX
void exit_handler() {
#endif
print_peak_memory_reentrant();
}
void out_of_memory_handler() {
/*
We do not use any memory padding currently. The methods below should
only use stack memory. If we ever run into situations where the stack
memory is not sufficient, we can consider using sigaltstack to reserve
memory for the stack of the signal handler and raising a signal here.
*/
write_reentrant_str(STDOUT_FILENO, "Failed to allocate memory.\n");
exit_with(ExitCode::SEARCH_OUT_OF_MEMORY);
}
void signal_handler(int signal_number) {
print_peak_memory_reentrant();
write_reentrant_str(STDOUT_FILENO, "caught signal ");
write_reentrant_int(STDOUT_FILENO, signal_number);
write_reentrant_str(STDOUT_FILENO, " -- exiting\n");
if (signal_number == SIGXCPU) {
exit_after_receiving_signal(ExitCode::SEARCH_OUT_OF_TIME);
}
raise(signal_number);
}
/*
NOTE: we have two variants of obtaining peak memory information.
get_peak_memory_in_kb() is used during the regular execution.
print_peak_memory_in_kb_reentrant() is used in signal handlers.
The latter is slower but guarantees reentrancy.
*/
int get_peak_memory_in_kb() {
// On error, produces a warning on cerr and returns -1.
int memory_in_kb = -1;
#if OPERATING_SYSTEM == OSX
// Based on http://stackoverflow.com/questions/63166
task_basic_info t_info;
mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT;
if (task_info(mach_task_self(), TASK_BASIC_INFO,
reinterpret_cast<task_info_t>(&t_info),
&t_info_count) == KERN_SUCCESS) {
memory_in_kb = t_info.virtual_size / 1024;
}
#else
ifstream procfile;
procfile.open("/proc/self/status");
string word;
while (procfile.good()) {
procfile >> word;
if (word == "VmPeak:") {
procfile >> memory_in_kb;
break;
}
// Skip to end of line.
procfile.ignore(numeric_limits<streamsize>::max(), '\n');
}
if (procfile.fail())
memory_in_kb = -1;
#endif
if (memory_in_kb == -1)
cerr << "warning: could not determine peak memory" << endl;
return memory_in_kb;
}
void register_event_handlers() {
// Terminate when running out of memory.
set_new_handler(out_of_memory_handler);
// On exit or when receiving certain signals such as SIGINT (Ctrl-C),
// print the peak memory usage.
#if OPERATING_SYSTEM == LINUX
on_exit(exit_handler, 0);
#elif OPERATING_SYSTEM == OSX
atexit(exit_handler);
#endif
struct sigaction default_signal_action;
default_signal_action.sa_handler = signal_handler;
// Block all signals we handle while one of them is handled.
sigemptyset(&default_signal_action.sa_mask);
sigaddset(&default_signal_action.sa_mask, SIGABRT);
sigaddset(&default_signal_action.sa_mask, SIGTERM);
sigaddset(&default_signal_action.sa_mask, SIGSEGV);
sigaddset(&default_signal_action.sa_mask, SIGINT);
sigaddset(&default_signal_action.sa_mask, SIGXCPU);
// Reset handler to default action after completion.
default_signal_action.sa_flags = SA_RESETHAND;
sigaction(SIGABRT, &default_signal_action, 0);
sigaction(SIGTERM, &default_signal_action, 0);
sigaction(SIGSEGV, &default_signal_action, 0);
sigaction(SIGINT, &default_signal_action, 0);
sigaction(SIGXCPU, &default_signal_action, 0);
}
void report_exit_code_reentrant(ExitCode exitcode) {
const char *message = get_exit_code_message_reentrant(exitcode);
bool is_error = is_exit_code_error_reentrant(exitcode);
if (message) {
int filedescr = is_error ? STDERR_FILENO : STDOUT_FILENO;
write_reentrant_str(filedescr, message);
write_reentrant_char(filedescr, '\n');
} else {
write_reentrant_str(STDERR_FILENO, "Exitcode: ");
write_reentrant_int(STDERR_FILENO, static_cast<int>(exitcode));
write_reentrant_str(STDERR_FILENO, "\nUnknown exitcode.\n");
abort();
}
}
int get_process_id() {
return getpid();
}
}
#endif
| 8,452 |
C++
| 29.96337 | 82 | 0.646238 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/math.cc
|
#include "math.h"
#include <cassert>
#include <cstdlib>
#include <limits>
using namespace std;
namespace utils {
bool is_product_within_limit(int factor1, int factor2, int limit) {
assert(factor1 >= 0);
assert(factor2 >= 0);
assert(limit >= 0);
return factor2 == 0 || factor1 <= limit / factor2;
}
static bool is_product_within_limit_unsigned(
unsigned int factor1, unsigned int factor2, unsigned int limit) {
return factor2 == 0 || factor1 <= limit / factor2;
}
static unsigned int safe_abs(int x) {
// Don't call abs() if the call would overflow.
if (x == numeric_limits<int>::min()) {
return static_cast<unsigned int>(-(x + 1)) + 1u;
}
return abs(x);
}
bool is_product_within_limits(
int factor1, int factor2, int lower_limit, int upper_limit) {
assert(lower_limit < 0);
assert(upper_limit >= 0);
if (factor1 >= 0 && factor2 >= 0) {
return is_product_within_limit(factor1, factor2, upper_limit);
} else if (factor1 < 0 && factor2 < 0) {
return is_product_within_limit_unsigned(
safe_abs(factor1), safe_abs(factor2), upper_limit);
} else {
return is_product_within_limit_unsigned(
safe_abs(factor1), safe_abs(factor2), safe_abs(lower_limit));
}
}
}
| 1,282 |
C++
| 26.891304 | 73 | 0.628705 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/rng.cc
|
#include "rng.h"
#include "system.h"
#include <chrono>
using namespace std;
namespace utils {
/*
Ideally, one would use true randomness here from std::random_device. However,
there exist platforms where this returns non-random data, which is condoned by
the standard. On these platforms one would need to additionally seed with time
and process ID (PID), and therefore generally seeding with time and PID only
is probably good enough.
*/
RandomNumberGenerator::RandomNumberGenerator() {
unsigned int secs = static_cast<unsigned int>(
chrono::system_clock::now().time_since_epoch().count());
seed(secs + get_process_id());
}
RandomNumberGenerator::RandomNumberGenerator(int seed_) {
seed(seed_);
}
RandomNumberGenerator::~RandomNumberGenerator() {
}
void RandomNumberGenerator::seed(int seed) {
rng.seed(seed);
}
}
| 855 |
C++
| 24.17647 | 80 | 0.730994 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/strings.h
|
#ifndef UTILS_STRINGS_H
#define UTILS_STRINGS_H
#include "exceptions.h"
#include <sstream>
#include <string>
namespace utils {
class StringOperationError : public utils::Exception {
std::string msg;
public:
explicit StringOperationError(const std::string &msg);
virtual void print() const override;
};
extern void lstrip(std::string &s);
extern void rstrip(std::string &s);
extern void strip(std::string &s);
/*
Split a given string at the first occurrence of separator or throw
StringOperationError if separator is not found.
*/
extern std::pair<std::string, std::string> split(
const std::string &s, const std::string &separator);
extern bool startswith(const std::string &s, const std::string &prefix);
template<typename Collection>
std::string join(const Collection &collection, const std::string &delimiter) {
std::ostringstream oss;
bool first_item = true;
for (const auto &item : collection) {
if (first_item)
first_item = false;
else
oss << delimiter;
oss << item;
}
return oss.str();
}
}
#endif
| 1,101 |
C
| 22.446808 | 78 | 0.681199 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/timer.h
|
#ifndef UTILS_TIMER_H
#define UTILS_TIMER_H
#include "system.h"
#include <ostream>
namespace utils {
class Duration {
double seconds;
public:
explicit Duration(double seconds) : seconds(seconds) {}
operator double() const {
return seconds;
}
};
std::ostream &operator<<(std::ostream &os, const Duration &time);
class Timer {
double last_start_clock;
double collected_time;
bool stopped;
#if OPERATING_SYSTEM == WINDOWS
LARGE_INTEGER frequency;
LARGE_INTEGER start_ticks;
#endif
double current_clock() const;
public:
explicit Timer(bool start = true);
~Timer() = default;
Duration operator()() const;
Duration stop();
void resume();
Duration reset();
};
std::ostream &operator<<(std::ostream &os, const Timer &timer);
extern Timer g_timer;
}
#endif
| 829 |
C
| 17.444444 | 65 | 0.665862 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/utils/countdown_timer.h
|
#ifndef UTILS_COUNTDOWN_TIMER_H
#define UTILS_COUNTDOWN_TIMER_H
#include "timer.h"
namespace utils {
class CountdownTimer {
Timer timer;
double max_time;
public:
explicit CountdownTimer(double max_time);
~CountdownTimer();
bool is_expired() const;
Duration get_elapsed_time() const;
Duration get_remaining_time() const;
};
}
#endif
| 363 |
C
| 17.199999 | 45 | 0.702479 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_precomputed.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_PRECOMPUTED_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_PRECOMPUTED_H
#include "merge_strategy_factory.h"
namespace options {
class Options;
}
namespace merge_and_shrink {
class MergeTreeFactory;
class MergeStrategyFactoryPrecomputed : public MergeStrategyFactory {
std::shared_ptr<MergeTreeFactory> merge_tree_factory;
protected:
virtual std::string name() const override;
virtual void dump_strategy_specific_options() const override;
public:
explicit MergeStrategyFactoryPrecomputed(options::Options &options);
virtual ~MergeStrategyFactoryPrecomputed() override = default;
virtual std::unique_ptr<MergeStrategy> compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) override;
virtual bool requires_init_distances() const override;
virtual bool requires_goal_distances() const override;
};
}
#endif
| 943 |
C
| 31.551723 | 72 | 0.776246 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_H
#include <utility>
namespace merge_and_shrink {
class FactoredTransitionSystem;
/*
A merge strategy dictates the order in which transition systems of the
factored transition system maintained by the merge-and-shrink heuristic
should be merged.
We distinguish three types of merge strategies: a stateless type, a
precomputed type, and a third unspecified type, which does not fit
either category.
Stateless merge strategies: they do not store any information and determine
the next merge soley based on the current factored transition system.
Precomputed merge strategies: they are represented in the form of a merge
tree (see class MergeTree). They return the next merge based on that
precomputed tree. This requires that the actual merge performed always
matches the one dictated by the precomputed strategy, i.e. it is mandatory
that the merge tree maintained by the precomputed strategy remains
synchronized with the current factored transition system.
Special merge strategies: these do not fit either of the other categories
and are usually a combination of existing stateless and/or precomputed merge
strategies. For example, the SCCs merge strategy (Sievers et al, ICAPS 2016)
needs to know which of the SCCs have been merged. While merging all variables
within an SCC, it makes use of a stateless merge strategy or a merge tree,
until that SCC has been entirely processed. There is currently no such merge
strategy in the Fast Downward repository.
NOTE: While stateless merge strategies have full control over the merge
order, this is not true for the specific implementation of merge tree,
because we always perform the next "left-most" merge in the merge tree.
See also the documentation in merge_tree.h.
*/
class MergeStrategy {
protected:
const FactoredTransitionSystem &fts;
public:
explicit MergeStrategy(const FactoredTransitionSystem &fts);
virtual ~MergeStrategy() = default;
virtual std::pair<int, int> get_next() = 0;
};
}
#endif
| 2,108 |
C
| 39.557692 | 79 | 0.781309 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_fh.h
|
#ifndef MERGE_AND_SHRINK_SHRINK_FH_H
#define MERGE_AND_SHRINK_SHRINK_FH_H
#include "shrink_bucket_based.h"
#include <vector>
namespace options {
class Options;
}
namespace merge_and_shrink {
/*
NOTE: In case where we must merge across buckets (i.e. when
the number of (f, h) pairs is larger than the number of
permitted abstract states), this shrink strategy will *not* make
an effort to be at least be h-preserving.
This could be improved, but not without complicating the code.
Usually we set the number of abstract states large enough that we
do not need to merge across buckets. Therefore the complication
might not be worth the code maintenance cost.
*/
class ShrinkFH : public ShrinkBucketBased {
public:
enum class HighLow {HIGH, LOW};
private:
const HighLow f_start;
const HighLow h_start;
std::vector<Bucket> ordered_buckets_use_vector(
const TransitionSystem &ts,
const Distances &distances,
int max_f,
int max_h) const;
std::vector<Bucket> ordered_buckets_use_map(
const TransitionSystem &ts,
const Distances &distances) const;
protected:
virtual std::string name() const override;
virtual void dump_strategy_specific_options() const override;
virtual std::vector<Bucket> partition_into_buckets(
const TransitionSystem &ts,
const Distances &distances) const override;
public:
explicit ShrinkFH(const options::Options &opts);
virtual ~ShrinkFH() override = default;
virtual bool requires_init_distances() const override {
return true;
}
virtual bool requires_goal_distances() const override {
return true;
}
};
}
#endif
| 1,694 |
C
| 25.904761 | 67 | 0.70425 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_H
#include <string>
#include <vector>
class TaskProxy;
namespace merge_and_shrink {
class FactoredTransitionSystem;
class MergeScoringFunction {
protected:
bool initialized;
virtual std::string name() const = 0;
virtual void dump_function_specific_options() const {}
public:
MergeScoringFunction();
virtual ~MergeScoringFunction() = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) = 0;
virtual bool requires_init_distances() const = 0;
virtual bool requires_goal_distances() const = 0;
// Overriding methods must set initialized to true.
virtual void initialize(const TaskProxy &) {
initialized = true;
}
void dump_options() const;
};
}
#endif
| 920 |
C
| 25.314285 | 70 | 0.71087 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_H
#include <memory>
#include <string>
class TaskProxy;
namespace merge_and_shrink {
class FactoredTransitionSystem;
class MergeStrategy;
class MergeStrategyFactory {
protected:
virtual std::string name() const = 0;
virtual void dump_strategy_specific_options() const = 0;
public:
MergeStrategyFactory() = default;
virtual ~MergeStrategyFactory() = default;
void dump_options() const;
virtual std::unique_ptr<MergeStrategy> compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) = 0;
virtual bool requires_init_distances() const = 0;
virtual bool requires_goal_distances() const = 0;
};
}
#endif
| 781 |
C
| 25.066666 | 66 | 0.732394 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_random.h
|
#ifndef MERGE_AND_SHRINK_SHRINK_RANDOM_H
#define MERGE_AND_SHRINK_SHRINK_RANDOM_H
#include "shrink_bucket_based.h"
namespace options {
class Options;
}
namespace merge_and_shrink {
class ShrinkRandom : public ShrinkBucketBased {
protected:
virtual std::vector<Bucket> partition_into_buckets(
const TransitionSystem &ts,
const Distances &distances) const override;
virtual std::string name() const override;
void dump_strategy_specific_options() const override {}
public:
explicit ShrinkRandom(const options::Options &opts);
virtual ~ShrinkRandom() override = default;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return false;
}
};
}
#endif
| 799 |
C
| 22.529411 | 59 | 0.71214 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/transition_system.h
|
#ifndef MERGE_AND_SHRINK_TRANSITION_SYSTEM_H
#define MERGE_AND_SHRINK_TRANSITION_SYSTEM_H
#include "types.h"
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class Distances;
class LabelEquivalenceRelation;
class LabelGroup;
class Labels;
struct Transition {
int src;
int target;
Transition(int src, int target)
: src(src), target(target) {
}
bool operator==(const Transition &other) const {
return src == other.src && target == other.target;
}
bool operator<(const Transition &other) const {
return src < other.src || (src == other.src && target < other.target);
}
// Required for "is_sorted_unique" in utilities
bool operator>=(const Transition &other) const {
return !(*this < other);
}
};
struct GroupAndTransitions {
const LabelGroup &label_group;
const std::vector<Transition> &transitions;
GroupAndTransitions(const LabelGroup &label_group,
const std::vector<Transition> &transitions)
: label_group(label_group),
transitions(transitions) {
}
};
class TSConstIterator {
/*
This class allows users to easily iterate over both label groups and
their transitions of a TransitionSystem. Most importantly, it hides
the data structure used by LabelEquivalenceRelation, which could be
easily exchanged.
*/
const LabelEquivalenceRelation &label_equivalence_relation;
const std::vector<std::vector<Transition>> &transitions_by_group_id;
// current_group_id is the actual iterator
int current_group_id;
void next_valid_index();
public:
TSConstIterator(const LabelEquivalenceRelation &label_equivalence_relation,
const std::vector<std::vector<Transition>> &transitions_by_group_id,
bool end);
void operator++();
GroupAndTransitions operator*() const;
bool operator==(const TSConstIterator &rhs) const {
return current_group_id == rhs.current_group_id;
}
bool operator!=(const TSConstIterator &rhs) const {
return current_group_id != rhs.current_group_id;
}
};
class TransitionSystem {
private:
/*
The following two attributes are only used for output.
- num_variables: total number of variables in the factored
transition system
- incorporated_variables: variables that contributed to this
transition system
*/
const int num_variables;
std::vector<int> incorporated_variables;
/*
All locally equivalent labels are grouped together, and their
transitions are only stored once for every such group, see below.
LabelEquivalenceRelation stores the equivalence relation over all
labels of the underlying factored transition system.
*/
std::unique_ptr<LabelEquivalenceRelation> label_equivalence_relation;
/*
The transitions of a label group are indexed via its ID. The ID of a
group does not change, and hence its transitions are never moved.
We tested different alternatives to store the transitions, but they all
performed worse: storing a vector transitions in the label group increases
memory usage and runtime; storing the transitions more compactly and
incrementally increasing the size of transitions_of_groups whenever a
new label group is added also increases runtime. See also issue492 and
issue521.
*/
std::vector<std::vector<Transition>> transitions_by_group_id;
int num_states;
std::vector<bool> goal_states;
int init_state;
/*
Check if two or more labels are locally equivalent to each other, and
if so, update the label equivalence relation.
*/
void compute_locally_equivalent_labels();
const std::vector<Transition> &get_transitions_for_group_id(int group_id) const {
return transitions_by_group_id[group_id];
}
// Statistics and output
int compute_total_transitions() const;
std::string get_description() const;
public:
TransitionSystem(
int num_variables,
std::vector<int> &&incorporated_variables,
std::unique_ptr<LabelEquivalenceRelation> &&label_equivalence_relation,
std::vector<std::vector<Transition>> &&transitions_by_group_id,
int num_states,
std::vector<bool> &&goal_states,
int init_state);
TransitionSystem(const TransitionSystem &other);
~TransitionSystem();
/*
Factory method to construct the merge of two transition systems.
Invariant: the children ts1 and ts2 must be solvable.
(It is a bug to merge an unsolvable transition system.)
*/
static std::unique_ptr<TransitionSystem> merge(
const Labels &labels,
const TransitionSystem &ts1,
const TransitionSystem &ts2,
utils::Verbosity verbosity);
/*
Applies the given state equivalence relation to the transition system.
abstraction_mapping is a mapping from old states to new states, and it
must be consistent with state_equivalence_relation in the sense that
old states are only mapped to the same new state if they are in the same
equivalence class as specified in state_equivalence_relation.
*/
void apply_abstraction(
const StateEquivalenceRelation &state_equivalence_relation,
const std::vector<int> &abstraction_mapping,
utils::Verbosity verbosity);
/*
Applies the given label mapping, mapping old to new label numbers. This
updates the label equivalence relation which is internally used to group
locally equivalent labels and store their transitions only once.
*/
void apply_label_reduction(
const std::vector<std::pair<int, std::vector<int>>> &label_mapping,
bool only_equivalent_labels);
TSConstIterator begin() const {
return TSConstIterator(*label_equivalence_relation,
transitions_by_group_id,
false);
}
TSConstIterator end() const {
return TSConstIterator(*label_equivalence_relation,
transitions_by_group_id,
true);
}
/*
Method to identify the transition system in output.
Print "Atomic transition system #x: " for atomic transition systems,
where x is the variable. For composite transition systems, print
"Transition system (x/y): " for a transition system containing x
out of y variables.
*/
std::string tag() const;
/*
The transitions for every group of locally equivalent labels are
sorted (by source, by target) and there are no duplicates.
*/
bool are_transitions_sorted_unique() const;
bool in_sync_with_label_equivalence_relation() const;
bool is_solvable(const Distances &distances) const;
void dump_dot_graph() const;
void dump_labels_and_transitions() const;
void statistics() const;
int get_size() const {
return num_states;
}
int get_init_state() const {
return init_state;
}
bool is_goal_state(int state) const {
return goal_states[state];
}
const std::vector<int> &get_incorporated_variables() const {
return incorporated_variables;
}
};
}
#endif
| 7,430 |
C
| 31.030172 | 88 | 0.671063 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/label_reduction.cc
|
#include "label_reduction.h"
#include "factored_transition_system.h"
#include "label_equivalence_relation.h"
#include "labels.h"
#include "transition_system.h"
#include "types.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../task_proxy.h"
#include "../algorithms/equivalence_relation.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include "../utils/system.h"
#include <cassert>
#include <iostream>
#include <string>
#include <unordered_map>
using namespace std;
using utils::ExitCode;
namespace merge_and_shrink {
LabelReduction::LabelReduction(const Options &options)
: lr_before_shrinking(options.get<bool>("before_shrinking")),
lr_before_merging(options.get<bool>("before_merging")),
lr_method(options.get<LabelReductionMethod>("method")),
lr_system_order(options.get<LabelReductionSystemOrder>("system_order")),
rng(utils::parse_rng_from_options(options)) {
}
bool LabelReduction::initialized() const {
return !transition_system_order.empty();
}
void LabelReduction::initialize(const TaskProxy &task_proxy) {
assert(!initialized());
// Compute the transition system order.
size_t max_transition_system_count = task_proxy.get_variables().size() * 2 - 1;
transition_system_order.reserve(max_transition_system_count);
if (lr_system_order == LabelReductionSystemOrder::REGULAR
|| lr_system_order == LabelReductionSystemOrder::RANDOM) {
for (size_t i = 0; i < max_transition_system_count; ++i)
transition_system_order.push_back(i);
if (lr_system_order == LabelReductionSystemOrder::RANDOM) {
rng->shuffle(transition_system_order);
}
} else {
assert(lr_system_order == LabelReductionSystemOrder::REVERSE);
for (size_t i = 0; i < max_transition_system_count; ++i)
transition_system_order.push_back(max_transition_system_count - 1 - i);
}
}
void LabelReduction::compute_label_mapping(
const equivalence_relation::EquivalenceRelation *relation,
const FactoredTransitionSystem &fts,
vector<pair<int, vector<int>>> &label_mapping,
utils::Verbosity verbosity) const {
const Labels &labels = fts.get_labels();
int next_new_label_no = labels.get_size();
int num_labels = 0;
int num_labels_after_reduction = 0;
for (auto group_it = relation->begin();
group_it != relation->end(); ++group_it) {
const equivalence_relation::Block &block = *group_it;
unordered_map<int, vector<int>> equivalent_label_nos;
for (auto label_it = block.begin();
label_it != block.end(); ++label_it) {
assert(*label_it < next_new_label_no);
int label_no = *label_it;
if (labels.is_current_label(label_no)) {
// only consider non-reduced labels
int cost = labels.get_label_cost(label_no);
equivalent_label_nos[cost].push_back(label_no);
++num_labels;
}
}
for (auto it = equivalent_label_nos.begin();
it != equivalent_label_nos.end(); ++it) {
const vector<int> &label_nos = it->second;
if (label_nos.size() > 1) {
if (verbosity >= utils::Verbosity::DEBUG) {
utils::g_log << "Reducing labels " << label_nos << " to " << next_new_label_no << endl;
}
label_mapping.push_back(make_pair(next_new_label_no, label_nos));
++next_new_label_no;
}
if (!label_nos.empty()) {
++num_labels_after_reduction;
}
}
}
int number_reduced_labels = num_labels - num_labels_after_reduction;
if (verbosity >= utils::Verbosity::VERBOSE && number_reduced_labels > 0) {
utils::g_log << "Label reduction: "
<< num_labels << " labels, "
<< num_labels_after_reduction << " after reduction"
<< endl;
}
}
equivalence_relation::EquivalenceRelation
*LabelReduction::compute_combinable_equivalence_relation(
int ts_index,
const FactoredTransitionSystem &fts) const {
/*
Returns an equivalence relation over labels s.t. l ~ l'
iff l and l' are locally equivalent in all transition systems
T' \neq T. (They may or may not be locally equivalent in T.)
*/
// Create the equivalence relation where all labels are equivalent.
const Labels &labels = fts.get_labels();
int num_labels = labels.get_size();
vector<pair<int, int>> annotated_labels;
annotated_labels.reserve(num_labels);
for (int label_no = 0; label_no < num_labels; ++label_no) {
if (labels.is_current_label(label_no)) {
annotated_labels.push_back(make_pair(0, label_no));
}
}
equivalence_relation::EquivalenceRelation *relation =
equivalence_relation::EquivalenceRelation::from_annotated_elements<int>(
num_labels, annotated_labels);
for (int index : fts) {
if (index != ts_index) {
const TransitionSystem &ts = fts.get_transition_system(index);
for (GroupAndTransitions gat : ts) {
const LabelGroup &label_group = gat.label_group;
relation->refine(label_group.begin(), label_group.end());
}
}
}
return relation;
}
bool LabelReduction::reduce(
const pair<int, int> &next_merge,
FactoredTransitionSystem &fts,
utils::Verbosity verbosity) const {
assert(initialized());
assert(reduce_before_shrinking() || reduce_before_merging());
int num_transition_systems = fts.get_size();
if (lr_method == LabelReductionMethod::TWO_TRANSITION_SYSTEMS) {
/*
Note:
We compute the combinable relation for labels for the two transition
systems in the order given by the merge strategy. We conducted
experiments testing the impact of always starting with the larger
transitions system (in terms of variables) or with the smaller
transition system and found no significant differences.
*/
assert(fts.is_active(next_merge.first));
assert(fts.is_active(next_merge.second));
bool reduced = false;
equivalence_relation::EquivalenceRelation *relation =
compute_combinable_equivalence_relation(next_merge.first, fts);
vector<pair<int, vector<int>>> label_mapping;
compute_label_mapping(relation, fts, label_mapping, verbosity);
if (!label_mapping.empty()) {
fts.apply_label_mapping(label_mapping, next_merge.first);
reduced = true;
}
delete relation;
relation = nullptr;
utils::release_vector_memory(label_mapping);
relation = compute_combinable_equivalence_relation(
next_merge.second,
fts);
compute_label_mapping(relation, fts, label_mapping, verbosity);
if (!label_mapping.empty()) {
fts.apply_label_mapping(label_mapping, next_merge.second);
reduced = true;
}
delete relation;
return reduced;
}
/* Make sure that we start with an index not ouf of range for
all_transition_systems. */
size_t tso_index = 0;
assert(!transition_system_order.empty());
while (transition_system_order[tso_index] >= num_transition_systems) {
++tso_index;
assert(utils::in_bounds(tso_index, transition_system_order));
}
int max_iterations;
if (lr_method == LabelReductionMethod::ALL_TRANSITION_SYSTEMS) {
max_iterations = num_transition_systems;
} else if (lr_method == LabelReductionMethod::ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT) {
max_iterations = INF;
} else {
ABORT("unknown label reduction method");
}
int num_unsuccessful_iterations = 0;
bool reduced = false;
/*
If using ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT, this loop stops under
the following conditions: if there are no combinable labels for all
transition systems, we have num_unsuccessful_iterations =
num_transition_systems and break the loop.
Whenever there is a transition system for which we reduce labels, we
reset the counter num_unsuccessful_iterations to 1 (not to 0!) because
we only need to consider all remaining transitions systems, but not the
one itself again.
*/
for (int i = 0; i < max_iterations; ++i) {
int ts_index = transition_system_order[tso_index];
vector<pair<int, vector<int>>> label_mapping;
if (fts.is_active(ts_index)) {
equivalence_relation::EquivalenceRelation *relation =
compute_combinable_equivalence_relation(ts_index, fts);
compute_label_mapping(relation, fts, label_mapping, verbosity);
delete relation;
}
if (label_mapping.empty()) {
/*
Even if the index is inactive, we need to count it as
unsuccessful iterations, because the number of indices, i.e.
the size of the vector in the factored transition system
matters.
*/
++num_unsuccessful_iterations;
} else {
reduced = true;
// See comment for the loop and its exit conditions.
num_unsuccessful_iterations = 1;
fts.apply_label_mapping(label_mapping, ts_index);
}
if (num_unsuccessful_iterations == num_transition_systems) {
// See comment for the loop and its exit conditions.
break;
}
++tso_index;
if (tso_index == transition_system_order.size()) {
tso_index = 0;
}
while (transition_system_order[tso_index] >= num_transition_systems) {
++tso_index;
if (tso_index == transition_system_order.size()) {
tso_index = 0;
}
}
}
return reduced;
}
void LabelReduction::dump_options() const {
utils::g_log << "Label reduction options:" << endl;
utils::g_log << "Before merging: "
<< (lr_before_merging ? "enabled" : "disabled") << endl;
utils::g_log << "Before shrinking: "
<< (lr_before_shrinking ? "enabled" : "disabled") << endl;
utils::g_log << "Method: ";
switch (lr_method) {
case LabelReductionMethod::TWO_TRANSITION_SYSTEMS:
utils::g_log << "two transition systems (which will be merged next)";
break;
case LabelReductionMethod::ALL_TRANSITION_SYSTEMS:
utils::g_log << "all transition systems";
break;
case LabelReductionMethod::ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT:
utils::g_log << "all transition systems with fixpoint computation";
break;
}
utils::g_log << endl;
if (lr_method == LabelReductionMethod::ALL_TRANSITION_SYSTEMS ||
lr_method == LabelReductionMethod::ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT) {
utils::g_log << "System order: ";
switch (lr_system_order) {
case LabelReductionSystemOrder::REGULAR:
utils::g_log << "regular";
break;
case LabelReductionSystemOrder::REVERSE:
utils::g_log << "reversed";
break;
case LabelReductionSystemOrder::RANDOM:
utils::g_log << "random";
break;
}
utils::g_log << endl;
}
}
static shared_ptr<LabelReduction>_parse(OptionParser &parser) {
parser.document_synopsis(
"Exact generalized label reduction",
"This class implements the exact generalized label reduction "
"described in the following paper:" +
utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"Generalized Label Reduction for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-aaai2014.pdf",
"Proceedings of the 28th AAAI Conference on Artificial"
" Intelligence (AAAI 2014)",
"2358-2366",
"AAAI Press",
"2014"));
parser.add_option<bool>("before_shrinking",
"apply label reduction before shrinking");
parser.add_option<bool>("before_merging",
"apply label reduction before merging");
vector<string> label_reduction_method;
vector<string> label_reduction_method_doc;
label_reduction_method.push_back("TWO_TRANSITION_SYSTEMS");
label_reduction_method_doc.push_back(
"compute the 'combinable relation' only for the two transition "
"systems being merged next");
label_reduction_method.push_back("ALL_TRANSITION_SYSTEMS");
label_reduction_method_doc.push_back(
"compute the 'combinable relation' for labels once for every "
"transition system and reduce labels");
label_reduction_method.push_back("ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT");
label_reduction_method_doc.push_back(
"keep computing the 'combinable relation' for labels iteratively "
"for all transition systems until no more labels can be reduced");
parser.add_enum_option<LabelReductionMethod>(
"method",
label_reduction_method,
"Label reduction method. See the AAAI14 paper by "
"Sievers et al. for explanation of the default label "
"reduction method and the 'combinable relation' ."
"Also note that you must set at least one of the "
"options reduce_labels_before_shrinking or "
"reduce_labels_before_merging in order to use "
"the chosen label reduction configuration.",
"ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT",
label_reduction_method_doc);
vector<string> label_reduction_system_order;
vector<string> label_reduction_system_order_doc;
label_reduction_system_order.push_back("REGULAR");
label_reduction_system_order_doc.push_back(
"transition systems are considered in the order given in the planner "
"input if atomic and in the order of their creation if composite.");
label_reduction_system_order.push_back("REVERSE");
label_reduction_system_order_doc.push_back(
"inverse of REGULAR");
label_reduction_system_order.push_back("RANDOM");
label_reduction_system_order_doc.push_back(
"random order");
parser.add_enum_option<LabelReductionSystemOrder>(
"system_order",
label_reduction_system_order,
"Order of transition systems for the label reduction "
"methods that iterate over the set of all transition "
"systems. Only useful for the choices "
"all_transition_systems and "
"all_transition_systems_with_fixpoint for the option "
"label_reduction_method.",
"RANDOM",
label_reduction_system_order_doc);
// Add random_seed option.
utils::add_rng_options(parser);
Options opts = parser.parse();
if (parser.help_mode()) {
return nullptr;
} else if (parser.dry_run()) {
bool lr_before_shrinking = opts.get<bool>("before_shrinking");
bool lr_before_merging = opts.get<bool>("before_merging");
if (!lr_before_shrinking && !lr_before_merging) {
cerr << "Please turn on at least one of the options "
<< "before_shrinking or before_merging!" << endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
return nullptr;
} else {
return make_shared<LabelReduction>(opts);
}
}
static PluginTypePlugin<LabelReduction> _type_plugin(
"LabelReduction",
"This page describes the current single 'option' for label reduction.");
static Plugin<LabelReduction> _plugin("exact", _parse);
}
| 15,885 |
C++
| 38.715 | 107 | 0.624048 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/factored_transition_system.h
|
#ifndef MERGE_AND_SHRINK_FACTORED_TRANSITION_SYSTEM_H
#define MERGE_AND_SHRINK_FACTORED_TRANSITION_SYSTEM_H
#include "types.h"
#include <memory>
#include <vector>
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class Distances;
class FactoredTransitionSystem;
class MergeAndShrinkRepresentation;
class Labels;
class TransitionSystem;
class FTSConstIterator {
/*
This class allows users to easily iterate over the active indices of
a factored transition system.
*/
const FactoredTransitionSystem &fts;
// current_index is the actual iterator
int current_index;
void next_valid_index();
public:
FTSConstIterator(const FactoredTransitionSystem &fts, bool end);
void operator++();
int operator*() const {
return current_index;
}
bool operator==(const FTSConstIterator &rhs) const {
return current_index == rhs.current_index;
}
bool operator!=(const FTSConstIterator &rhs) const {
return current_index != rhs.current_index;
}
};
/*
NOTE: A "factor" of this factored transition system is identfied by its
index as used in the vectors in this class. Since transformations like
merging also add and remove factors, not all indices are necessarily
associated with factors. This is what the class uses the notion of "active"
factors for: an index is active iff there exists a transition system, a
merge-and-shrink representation and an distances object in the corresponding
vectors.
TODO: The user of this class has to care more about the notion of active
factors as we would like it to be. We should change this and clean up the
interface that this class shows to the outside world.
*/
class FactoredTransitionSystem {
std::unique_ptr<Labels> labels;
// Entries with nullptr have been merged.
std::vector<std::unique_ptr<TransitionSystem>> transition_systems;
std::vector<std::unique_ptr<MergeAndShrinkRepresentation>> mas_representations;
std::vector<std::unique_ptr<Distances>> distances;
const bool compute_init_distances;
const bool compute_goal_distances;
int num_active_entries;
/*
Assert that the factor at the given index is in a consistent state, i.e.
that there is a transition system, a distances object, and an MSR.
*/
void assert_index_valid(int index) const;
/*
We maintain the invariant that for all factors, distances are always
computed and all transitions are grouped according to locally equivalent
labels.
*/
bool is_component_valid(int index) const;
void assert_all_components_valid() const;
public:
FactoredTransitionSystem(
std::unique_ptr<Labels> labels,
std::vector<std::unique_ptr<TransitionSystem>> &&transition_systems,
std::vector<std::unique_ptr<MergeAndShrinkRepresentation>> &&mas_representations,
std::vector<std::unique_ptr<Distances>> &&distances,
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity);
FactoredTransitionSystem(FactoredTransitionSystem &&other);
~FactoredTransitionSystem();
// No copying or assignment.
FactoredTransitionSystem(const FactoredTransitionSystem &) = delete;
FactoredTransitionSystem &operator=(
const FactoredTransitionSystem &) = delete;
// Merge-and-shrink transformations.
/*
Apply the given label mapping to the factored transition system by
updating all transitions of all transition systems. Only for the factor
at combinable_index, the local equivalence relation over labels must be
recomputed; for all factors, all labels that are combined by the label
mapping have been locally equivalent already before.
*/
void apply_label_mapping(
const std::vector<std::pair<int, std::vector<int>>> &label_mapping,
int combinable_index);
/*
Apply the given state equivalence relation to the transition system at
index if it would reduce its size. If the transition system was shrunk,
update the other components of the factor (distances, MSR) and return
true, otherwise return false.
Note that this method is also suitable to be used for a prune
transformation. All states not mentioned in the state equivalence
relation are pruned.
*/
bool apply_abstraction(
int index,
const StateEquivalenceRelation &state_equivalence_relation,
utils::Verbosity verbosity);
/*
Merge the two factors at index1 and index2.
*/
int merge(
int index1,
int index2,
utils::Verbosity verbosity);
/*
Extract the factor at the given index, rendering the FTS invalid.
*/
std::pair<std::unique_ptr<MergeAndShrinkRepresentation>,
std::unique_ptr<Distances>> extract_factor(int index);
void statistics(int index) const;
void dump(int index) const;
void dump() const;
const TransitionSystem &get_transition_system(int index) const {
return *transition_systems[index];
}
const Distances &get_distances(int index) const {
return *distances[index];
}
/*
A factor is solvabe iff the distance of the initial state to some goal
state is not infinity. Technically, the distance is infinity either if
the information of Distances is infinity or if the initial state is
pruned.
*/
bool is_factor_solvable(int index) const;
/*
A factor is trivial iff every concrete state is mapped to an abstract
goal state, which is equivalent to saying that the corresponding
merge-and-shrink representation is a total function and all abstract
states are goal states.
If h is the heuristic for the factor F, then we have:
F trivial => h(s) = 0 for all states s.
Note that a factor being trivial is sufficient but not necessary for
its heuristic to be useless. Scenarios of useless heuristics that are
not captured include:
- All non-goal states are connected to goal states on 0-cost paths.
- The only pruned states are unreachable (in this case, we get
h(s) = 0 for all reachable states, which is useless in most
contexts).
*/
bool is_factor_trivial(int index) const;
int get_num_active_entries() const {
return num_active_entries;
}
// Used by LabelReduction and MergeScoringFunctionDFP
const Labels &get_labels() const {
return *labels;
}
// The following methods are used for iterating over the FTS
FTSConstIterator begin() const {
return FTSConstIterator(*this, false);
}
FTSConstIterator end() const {
return FTSConstIterator(*this, true);
}
int get_size() const {
return transition_systems.size();
}
bool is_active(int index) const;
};
}
#endif
| 6,925 |
C
| 32.298077 | 89 | 0.695451 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_representation.cc
|
#include "merge_and_shrink_representation.h"
#include "distances.h"
#include "types.h"
#include "../task_proxy.h"
#include "../utils/logging.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <numeric>
using namespace std;
namespace merge_and_shrink {
MergeAndShrinkRepresentation::MergeAndShrinkRepresentation(int domain_size)
: domain_size(domain_size) {
}
MergeAndShrinkRepresentation::~MergeAndShrinkRepresentation() {
}
int MergeAndShrinkRepresentation::get_domain_size() const {
return domain_size;
}
MergeAndShrinkRepresentationLeaf::MergeAndShrinkRepresentationLeaf(
int var_id, int domain_size)
: MergeAndShrinkRepresentation(domain_size),
var_id(var_id),
lookup_table(domain_size) {
iota(lookup_table.begin(), lookup_table.end(), 0);
}
void MergeAndShrinkRepresentationLeaf::set_distances(
const Distances &distances) {
assert(distances.are_goal_distances_computed());
for (int &entry : lookup_table) {
if (entry != PRUNED_STATE) {
entry = distances.get_goal_distance(entry);
}
}
}
void MergeAndShrinkRepresentationLeaf::apply_abstraction_to_lookup_table(
const vector<int> &abstraction_mapping) {
int new_domain_size = 0;
for (int &entry : lookup_table) {
if (entry != PRUNED_STATE) {
entry = abstraction_mapping[entry];
new_domain_size = max(new_domain_size, entry + 1);
}
}
domain_size = new_domain_size;
}
int MergeAndShrinkRepresentationLeaf::get_value(const State &state) const {
int value = state[var_id].get_value();
return lookup_table[value];
}
bool MergeAndShrinkRepresentationLeaf::is_total() const {
for (int entry : lookup_table) {
if (entry == PRUNED_STATE) {
return false;
}
}
return true;
}
void MergeAndShrinkRepresentationLeaf::dump() const {
utils::g_log << "lookup table (leaf): ";
for (const auto &value : lookup_table) {
utils::g_log << value << ", ";
}
utils::g_log << endl;
}
MergeAndShrinkRepresentationMerge::MergeAndShrinkRepresentationMerge(
unique_ptr<MergeAndShrinkRepresentation> left_child_,
unique_ptr<MergeAndShrinkRepresentation> right_child_)
: MergeAndShrinkRepresentation(left_child_->get_domain_size() *
right_child_->get_domain_size()),
left_child(move(left_child_)),
right_child(move(right_child_)),
lookup_table(left_child->get_domain_size(),
vector<int>(right_child->get_domain_size())) {
int counter = 0;
for (vector<int> &row : lookup_table) {
for (int &entry : row) {
entry = counter;
++counter;
}
}
}
void MergeAndShrinkRepresentationMerge::set_distances(
const Distances &distances) {
assert(distances.are_goal_distances_computed());
for (vector<int> &row : lookup_table) {
for (int &entry : row) {
if (entry != PRUNED_STATE) {
entry = distances.get_goal_distance(entry);
}
}
}
}
void MergeAndShrinkRepresentationMerge::apply_abstraction_to_lookup_table(
const vector<int> &abstraction_mapping) {
int new_domain_size = 0;
for (vector<int> &row : lookup_table) {
for (int &entry : row) {
if (entry != PRUNED_STATE) {
entry = abstraction_mapping[entry];
new_domain_size = max(new_domain_size, entry + 1);
}
}
}
domain_size = new_domain_size;
}
int MergeAndShrinkRepresentationMerge::get_value(
const State &state) const {
int state1 = left_child->get_value(state);
int state2 = right_child->get_value(state);
if (state1 == PRUNED_STATE || state2 == PRUNED_STATE)
return PRUNED_STATE;
return lookup_table[state1][state2];
}
bool MergeAndShrinkRepresentationMerge::is_total() const {
for (const vector<int> &row : lookup_table) {
for (int entry : row) {
if (entry == PRUNED_STATE) {
return false;
}
}
}
return left_child->is_total() && right_child->is_total();
}
void MergeAndShrinkRepresentationMerge::dump() const {
utils::g_log << "lookup table (merge): " << endl;
for (const auto &row : lookup_table) {
for (const auto &value : row) {
utils::g_log << value << ", ";
}
utils::g_log << endl;
}
utils::g_log << "left child:" << endl;
left_child->dump();
utils::g_log << "right child:" << endl;
right_child->dump();
}
}
| 4,586 |
C++
| 27.490683 | 75 | 0.618186 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_strategy.cc
|
#include "shrink_strategy.h"
#include "factored_transition_system.h"
#include "transition_system.h"
#include "../plugin.h"
#include "../utils/logging.h"
#include <iostream>
using namespace std;
namespace merge_and_shrink {
void ShrinkStrategy::dump_options() const {
utils::g_log << "Shrink strategy options: " << endl;
utils::g_log << "Type: " << name() << endl;
dump_strategy_specific_options();
}
string ShrinkStrategy::get_name() const {
return name();
}
static PluginTypePlugin<ShrinkStrategy> _type_plugin(
"ShrinkStrategy",
"This page describes the various shrink strategies supported "
"by the planner.");
}
| 652 |
C++
| 20.766666 | 66 | 0.68865 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_stateless.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_STATELESS_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_STATELESS_H
#include "merge_strategy.h"
#include <memory>
namespace merge_and_shrink {
class MergeSelector;
class MergeStrategyStateless : public MergeStrategy {
const std::shared_ptr<MergeSelector> merge_selector;
public:
MergeStrategyStateless(
const FactoredTransitionSystem &fts,
const std::shared_ptr<MergeSelector> &merge_selector);
virtual ~MergeStrategyStateless() override = default;
virtual std::pair<int, int> get_next() override;
};
}
#endif
| 582 |
C
| 25.499999 | 62 | 0.750859 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_heuristic.cc
|
#include "merge_and_shrink_heuristic.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "merge_and_shrink_algorithm.h"
#include "merge_and_shrink_representation.h"
#include "transition_system.h"
#include "types.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../task_utils/task_properties.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/system.h"
#include <cassert>
#include <iostream>
#include <utility>
using namespace std;
using utils::ExitCode;
namespace merge_and_shrink {
MergeAndShrinkHeuristic::MergeAndShrinkHeuristic(const options::Options &opts)
: Heuristic(opts),
verbosity(opts.get<utils::Verbosity>("verbosity")) {
utils::g_log << "Initializing merge-and-shrink heuristic..." << endl;
MergeAndShrinkAlgorithm algorithm(opts);
FactoredTransitionSystem fts = algorithm.build_factored_transition_system(task_proxy);
extract_factors(fts);
utils::g_log << "Done initializing merge-and-shrink heuristic." << endl << endl;
}
void MergeAndShrinkHeuristic::extract_factor(
FactoredTransitionSystem &fts, int index) {
/*
Extract the factor at the given index from the given factored transition
system, compute goal distances if necessary and store the M&S
representation, which serves as the heuristic.
*/
auto final_entry = fts.extract_factor(index);
unique_ptr<MergeAndShrinkRepresentation> mas_representation = move(final_entry.first);
unique_ptr<Distances> distances = move(final_entry.second);
if (!distances->are_goal_distances_computed()) {
const bool compute_init = false;
const bool compute_goal = true;
distances->compute_distances(compute_init, compute_goal, verbosity);
}
assert(distances->are_goal_distances_computed());
mas_representation->set_distances(*distances);
mas_representations.push_back(move(mas_representation));
}
bool MergeAndShrinkHeuristic::extract_unsolvable_factor(FactoredTransitionSystem &fts) {
/* Check if there is an unsolvable factor. If so, extract and store it and
return true. Otherwise, return false. */
for (int index : fts) {
if (!fts.is_factor_solvable(index)) {
mas_representations.reserve(1);
extract_factor(fts, index);
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << fts.get_transition_system(index).tag()
<< "use this unsolvable factor as heuristic."
<< endl;
}
return true;
}
}
return false;
}
void MergeAndShrinkHeuristic::extract_nontrivial_factors(FactoredTransitionSystem &fts) {
// Iterate over remaining factors and extract and store the nontrivial ones.
for (int index : fts) {
if (fts.is_factor_trivial(index)) {
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << fts.get_transition_system(index).tag()
<< "is trivial." << endl;
}
} else {
extract_factor(fts, index);
}
}
}
void MergeAndShrinkHeuristic::extract_factors(FactoredTransitionSystem &fts) {
/*
TODO: This method has quite a bit of fiddling with aspects of
transition systems and the merge-and-shrink representation (checking
whether distances have been computed; computing them) that we would
like to have at a lower level. See also the TODO in
factored_transition_system.h on improving the interface of that class
(and also related classes like TransitionSystem etc).
*/
assert(mas_representations.empty());
int num_active_factors = fts.get_num_active_entries();
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Number of remaining factors: " << num_active_factors << endl;
}
bool unsolvalbe = extract_unsolvable_factor(fts);
if (!unsolvalbe) {
extract_nontrivial_factors(fts);
}
int num_factors_kept = mas_representations.size();
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Number of factors kept: " << num_factors_kept << endl;
}
}
int MergeAndShrinkHeuristic::compute_heuristic(const State &ancestor_state) {
State state = convert_ancestor_state(ancestor_state);
int heuristic = 0;
for (const unique_ptr<MergeAndShrinkRepresentation> &mas_representation : mas_representations) {
int cost = mas_representation->get_value(state);
if (cost == PRUNED_STATE || cost == INF) {
// If state is unreachable or irrelevant, we encountered a dead end.
return DEAD_END;
}
heuristic = max(heuristic, cost);
}
return heuristic;
}
static shared_ptr<Heuristic> _parse(options::OptionParser &parser) {
parser.document_synopsis(
"Merge-and-shrink heuristic",
"This heuristic implements the algorithm described in the following "
"paper:" + utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"Generalized Label Reduction for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-aaai2014.pdf",
"Proceedings of the 28th AAAI Conference on Artificial"
" Intelligence (AAAI 2014)",
"2358-2366",
"AAAI Press",
"2014") + "\n" +
"For a more exhaustive description of merge-and-shrink, see the journal "
"paper" + utils::format_journal_reference(
{"Malte Helmert", "Patrik Haslum", "Joerg Hoffmann", "Raz Nissim"},
"Merge-and-Shrink Abstraction: A Method for Generating Lower Bounds"
" in Factored State Spaces",
"https://ai.dmi.unibas.ch/papers/helmert-et-al-jacm2014.pdf",
"Journal of the ACM",
"61 (3)",
"16:1-63",
"2014") + "\n" +
"Please note that the journal paper describes the \"old\" theory of "
"label reduction, which has been superseded by the above conference "
"paper and is no longer implemented in Fast Downward.\n\n"
"The following paper describes how to improve the DFP merge strategy "
"with tie-breaking, and presents two new merge strategies (dyn-MIASM "
"and SCC-DFP):" + utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"An Analysis of Merge Strategies for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf",
"Proceedings of the 26th International Conference on Automated "
"Planning and Scheduling (ICAPS 2016)",
"294-298",
"AAAI Press",
"2016") + "\n" +
"Details of the algorithms and the implementation are described in the "
"paper" + utils::format_conference_reference(
{"Silvan Sievers"},
"Merge-and-Shrink Heuristics for Classical Planning: Efficient "
"Implementation and Partial Abstractions",
"https://ai.dmi.unibas.ch/papers/sievers-socs2018.pdf",
"Proceedings of the 11th Annual Symposium on Combinatorial Search "
"(SoCS 2018)",
"90-98",
"AAAI Press",
"2018")
);
parser.document_language_support("action costs", "supported");
parser.document_language_support("conditional effects", "supported (but see note)");
parser.document_language_support("axioms", "not supported");
parser.document_property("admissible", "yes (but see note)");
parser.document_property("consistent", "yes (but see note)");
parser.document_property("safe", "yes");
parser.document_property("preferred operators", "no");
parser.document_note(
"Note",
"Conditional effects are supported directly. Note, however, that "
"for tasks that are not factored (in the sense of the JACM 2014 "
"merge-and-shrink paper), the atomic transition systems on which "
"merge-and-shrink heuristics are based are nondeterministic, "
"which can lead to poor heuristics even when only perfect shrinking "
"is performed.");
parser.document_note(
"Note",
"When pruning unreachable states, admissibility and consistency is "
"only guaranteed for reachable states and transitions between "
"reachable states. While this does not impact regular A* search which "
"will never encounter any unreachable state, it impacts techniques "
"like symmetry-based pruning: a reachable state which is mapped to an "
"unreachable symmetric state (which hence is pruned) would falsely be "
"considered a dead-end and also be pruned, thus violating optimality "
"of the search.");
parser.document_note(
"Note",
"When using a time limit on the main loop of the merge-and-shrink "
"algorithm, the heuristic will compute the maximum over all heuristics "
"induced by the remaining factors if terminating the merge-and-shrink "
"algorithm early. Exception: if there is an unsolvable factor, it will "
"be used as the exclusive heuristic since the problem is unsolvable.");
parser.document_note(
"Note",
"A currently recommended good configuration uses bisimulation "
"based shrinking, the merge strategy SCC-DFP, and the appropriate "
"label reduction setting (max_states has been altered to be between "
"10k and 200k in the literature):\n"
"{{{\nmerge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),"
"merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector="
"score_based_filtering(scoring_functions=[goal_relevance,dfp,"
"total_order])),label_reduction=exact(before_shrinking=true,"
"before_merging=false),max_states=50k,threshold_before_merge=1)\n}}}\n");
Heuristic::add_options_to_parser(parser);
add_merge_and_shrink_algorithm_options_to_parser(parser);
options::Options opts = parser.parse();
if (parser.help_mode()) {
return nullptr;
}
handle_shrink_limit_options_defaults(opts);
if (parser.dry_run()) {
return nullptr;
} else {
return make_shared<MergeAndShrinkHeuristic>(opts);
}
}
static options::Plugin<Evaluator> _plugin("merge_and_shrink", _parse);
}
| 10,510 |
C++
| 42.614108 | 100 | 0.65176 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_tree_factory.h
|
#ifndef MERGE_AND_SHRINK_MERGE_TREE_FACTORY_H
#define MERGE_AND_SHRINK_MERGE_TREE_FACTORY_H
#include <string>
#include <memory>
#include <string>
#include <vector>
class TaskProxy;
namespace options {
class OptionParser;
class Options;
}
namespace utils {
class RandomNumberGenerator;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
class MergeTree;
enum class UpdateOption;
class MergeTreeFactory {
protected:
std::shared_ptr<utils::RandomNumberGenerator> rng;
UpdateOption update_option;
virtual std::string name() const = 0;
virtual void dump_tree_specific_options() const {}
public:
explicit MergeTreeFactory(const options::Options &options);
virtual ~MergeTreeFactory() = default;
void dump_options() const;
// Compute a merge tree for the given entire task.
virtual std::unique_ptr<MergeTree> compute_merge_tree(
const TaskProxy &task_proxy) = 0;
/* Compute a merge tree for the given current factored transition,
system, possibly for a subset of indices. */
virtual std::unique_ptr<MergeTree> compute_merge_tree(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts,
const std::vector<int> &indices_subset);
virtual bool requires_init_distances() const = 0;
virtual bool requires_goal_distances() const = 0;
// Derived classes must call this method in their parsing methods.
static void add_options_to_parser(options::OptionParser &parser);
};
}
#endif
| 1,493 |
C
| 27.730769 | 70 | 0.727395 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_selector_score_based_filtering.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SELECTOR_SCORE_BASED_FILTERING_H
#define MERGE_AND_SHRINK_MERGE_SELECTOR_SCORE_BASED_FILTERING_H
#include "merge_selector.h"
#include "merge_scoring_function.h"
#include <memory>
#include <vector>
namespace options {
class Options;
}
namespace merge_and_shrink {
class MergeSelectorScoreBasedFiltering : public MergeSelector {
std::vector<std::shared_ptr<MergeScoringFunction>> merge_scoring_functions;
std::vector<std::pair<int, int>> get_remaining_candidates(
const std::vector<std::pair<int, int>> &merge_candidates,
const std::vector<double> &scores) const;
protected:
virtual std::string name() const override;
virtual void dump_specific_options() const override;
public:
explicit MergeSelectorScoreBasedFiltering(const options::Options &options);
virtual ~MergeSelectorScoreBasedFiltering() override = default;
virtual std::pair<int, int> select_merge(
const FactoredTransitionSystem &fts,
const std::vector<int> &indices_subset = std::vector<int>()) const override;
virtual void initialize(const TaskProxy &task_proxy) override;
virtual bool requires_init_distances() const override;
virtual bool requires_goal_distances() const override;
};
}
#endif
| 1,268 |
C
| 32.394736 | 84 | 0.743691 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/transition_system.cc
|
#include "transition_system.h"
#include "distances.h"
#include "label_equivalence_relation.h"
#include "labels.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include "../utils/system.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <iterator>
#include <set>
#include <sstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
using namespace std;
using utils::ExitCode;
namespace merge_and_shrink {
ostream &operator<<(ostream &os, const Transition &trans) {
os << trans.src << "->" << trans.target;
return os;
}
// Sorts the given set of transitions and removes duplicates.
static void normalize_given_transitions(vector<Transition> &transitions) {
sort(transitions.begin(), transitions.end());
transitions.erase(unique(transitions.begin(), transitions.end()), transitions.end());
}
TSConstIterator::TSConstIterator(
const LabelEquivalenceRelation &label_equivalence_relation,
const vector<vector<Transition>> &transitions_by_group_id,
bool end)
: label_equivalence_relation(label_equivalence_relation),
transitions_by_group_id(transitions_by_group_id),
current_group_id((end ? label_equivalence_relation.get_size() : 0)) {
next_valid_index();
}
void TSConstIterator::next_valid_index() {
while (current_group_id < label_equivalence_relation.get_size()
&& label_equivalence_relation.is_empty_group(current_group_id)) {
++current_group_id;
}
}
void TSConstIterator::operator++() {
++current_group_id;
next_valid_index();
}
GroupAndTransitions TSConstIterator::operator*() const {
return GroupAndTransitions(
label_equivalence_relation.get_group(current_group_id),
transitions_by_group_id[current_group_id]);
}
/*
Implementation note: Transitions are grouped by their label groups,
not by source state or any such thing. Such a grouping is beneficial
for fast generation of products because we can iterate label group
by label group, and it also allows applying transition system
mappings very efficiently.
We rarely need to be able to efficiently query the successors of a
given state; actually, only the distance computation requires that,
and it simply generates such a graph representation of the
transitions itself. Various experiments have shown that maintaining
a graph representation permanently for the benefit of distance
computation is not worth the overhead.
*/
TransitionSystem::TransitionSystem(
int num_variables,
vector<int> &&incorporated_variables,
unique_ptr<LabelEquivalenceRelation> &&label_equivalence_relation,
vector<vector<Transition>> &&transitions_by_group_id,
int num_states,
vector<bool> &&goal_states,
int init_state)
: num_variables(num_variables),
incorporated_variables(move(incorporated_variables)),
label_equivalence_relation(move(label_equivalence_relation)),
transitions_by_group_id(move(transitions_by_group_id)),
num_states(num_states),
goal_states(move(goal_states)),
init_state(init_state) {
assert(are_transitions_sorted_unique());
assert(in_sync_with_label_equivalence_relation());
}
TransitionSystem::TransitionSystem(const TransitionSystem &other)
: num_variables(other.num_variables),
incorporated_variables(other.incorporated_variables),
label_equivalence_relation(
utils::make_unique_ptr<LabelEquivalenceRelation>(
*other.label_equivalence_relation)),
transitions_by_group_id(other.transitions_by_group_id),
num_states(other.num_states),
goal_states(other.goal_states),
init_state(other.init_state) {
}
TransitionSystem::~TransitionSystem() {
}
unique_ptr<TransitionSystem> TransitionSystem::merge(
const Labels &labels,
const TransitionSystem &ts1,
const TransitionSystem &ts2,
utils::Verbosity verbosity) {
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << "Merging " << ts1.get_description() << " and "
<< ts2.get_description() << endl;
}
assert(ts1.init_state != PRUNED_STATE && ts2.init_state != PRUNED_STATE);
assert(ts1.are_transitions_sorted_unique() && ts2.are_transitions_sorted_unique());
int num_variables = ts1.num_variables;
vector<int> incorporated_variables;
::set_union(
ts1.incorporated_variables.begin(), ts1.incorporated_variables.end(),
ts2.incorporated_variables.begin(), ts2.incorporated_variables.end(),
back_inserter(incorporated_variables));
vector<vector<int>> label_groups;
vector<vector<Transition>> transitions_by_group_id;
transitions_by_group_id.reserve(labels.get_max_size());
int ts1_size = ts1.get_size();
int ts2_size = ts2.get_size();
int num_states = ts1_size * ts2_size;
vector<bool> goal_states(num_states, false);
int init_state = -1;
for (int s1 = 0; s1 < ts1_size; ++s1) {
for (int s2 = 0; s2 < ts2_size; ++s2) {
int state = s1 * ts2_size + s2;
if (ts1.goal_states[s1] && ts2.goal_states[s2])
goal_states[state] = true;
if (s1 == ts1.init_state && s2 == ts2.init_state)
init_state = state;
}
}
assert(init_state != -1);
/*
We can compute the local equivalence relation of a composite T
from the local equivalence relations of the two components T1 and T2:
l and l' are locally equivalent in T iff:
(A) they are locally equivalent in T1 and in T2, or
(B) they are both dead in T (e.g., this includes the case where
l is dead in T1 only and l' is dead in T2 only, so they are not
locally equivalent in either of the components).
*/
int multiplier = ts2_size;
vector<int> dead_labels;
for (GroupAndTransitions gat : ts1) {
const LabelGroup &group1 = gat.label_group;
const vector<Transition> &transitions1 = gat.transitions;
// Distribute the labels of this group among the "buckets"
// corresponding to the groups of ts2.
unordered_map<int, vector<int>> buckets;
for (int label_no : group1) {
int group2_id = ts2.label_equivalence_relation->get_group_id(label_no);
buckets[group2_id].push_back(label_no);
}
// Now buckets contains all equivalence classes that are
// refinements of group1.
// Now create the new groups together with their transitions.
for (auto &bucket : buckets) {
const vector<Transition> &transitions2 =
ts2.get_transitions_for_group_id(bucket.first);
// Create the new transitions for this bucket
vector<Transition> new_transitions;
if (!transitions1.empty() && !transitions2.empty()
&& transitions1.size() > new_transitions.max_size() / transitions2.size())
utils::exit_with(ExitCode::SEARCH_OUT_OF_MEMORY);
new_transitions.reserve(transitions1.size() * transitions2.size());
for (const Transition &transition1 : transitions1) {
int src1 = transition1.src;
int target1 = transition1.target;
for (const Transition &transition2 : transitions2) {
int src2 = transition2.src;
int target2 = transition2.target;
int src = src1 * multiplier + src2;
int target = target1 * multiplier + target2;
new_transitions.push_back(Transition(src, target));
}
}
// Create a new group if the transitions are not empty
vector<int> &new_labels = bucket.second;
if (new_transitions.empty()) {
dead_labels.insert(dead_labels.end(), new_labels.begin(), new_labels.end());
} else {
sort(new_transitions.begin(), new_transitions.end());
label_groups.push_back(move(new_labels));
transitions_by_group_id.push_back(move(new_transitions));
}
}
}
/*
We collect all dead labels separately, because the bucket refining
does not work in cases where there are at least two dead labels l1
and l2 in the composite, where l1 was only a dead label in the first
component and l2 was only a dead label in the second component.
All dead labels should form one single label group.
*/
if (!dead_labels.empty()) {
label_groups.push_back(move(dead_labels));
// Dead labels have empty transitions
transitions_by_group_id.emplace_back();
}
assert(transitions_by_group_id.size() == label_groups.size());
unique_ptr<LabelEquivalenceRelation> label_equivalence_relation =
utils::make_unique_ptr<LabelEquivalenceRelation>(labels, label_groups);
return utils::make_unique_ptr<TransitionSystem>(
num_variables,
move(incorporated_variables),
move(label_equivalence_relation),
move(transitions_by_group_id),
num_states,
move(goal_states),
init_state
);
}
void TransitionSystem::compute_locally_equivalent_labels() {
/*
Compare every group of labels and their transitions to all others and
merge two groups whenever the transitions are the same.
*/
for (int group_id1 = 0; group_id1 < label_equivalence_relation->get_size();
++group_id1) {
if (!label_equivalence_relation->is_empty_group(group_id1)) {
const vector<Transition> &transitions1 = transitions_by_group_id[group_id1];
for (int group_id2 = group_id1 + 1;
group_id2 < label_equivalence_relation->get_size(); ++group_id2) {
if (!label_equivalence_relation->is_empty_group(group_id2)) {
vector<Transition> &transitions2 = transitions_by_group_id[group_id2];
if (transitions1 == transitions2) {
label_equivalence_relation->move_group_into_group(
group_id2, group_id1);
utils::release_vector_memory(transitions2);
}
}
}
}
}
}
void TransitionSystem::apply_abstraction(
const StateEquivalenceRelation &state_equivalence_relation,
const vector<int> &abstraction_mapping,
utils::Verbosity verbosity) {
assert(are_transitions_sorted_unique());
assert(in_sync_with_label_equivalence_relation());
int new_num_states = state_equivalence_relation.size();
assert(new_num_states < num_states);
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << tag() << "applying abstraction (" << get_size()
<< " to " << new_num_states << " states)" << endl;
}
vector<bool> new_goal_states(new_num_states, false);
for (int new_state = 0; new_state < new_num_states; ++new_state) {
const StateEquivalenceClass &state_equivalence_class =
state_equivalence_relation[new_state];
assert(!state_equivalence_class.empty());
for (int old_state : state_equivalence_class) {
if (goal_states[old_state]) {
new_goal_states[new_state] = true;
break;
}
}
}
goal_states = move(new_goal_states);
// Update all transitions.
for (vector<Transition> &transitions : transitions_by_group_id) {
if (!transitions.empty()) {
vector<Transition> new_transitions;
/*
We reserve more memory than necessary here, but this is better
than potentially resizing the vector several times when inserting
transitions one after the other. See issue604-v6.
An alternative could be to not use a new vector, but to modify
the existing transitions inplace, and to remove all empty
positions in the end. This would be more ugly, though.
*/
new_transitions.reserve(transitions.size());
for (size_t i = 0; i < transitions.size(); ++i) {
const Transition &transition = transitions[i];
int src = abstraction_mapping[transition.src];
int target = abstraction_mapping[transition.target];
if (src != PRUNED_STATE && target != PRUNED_STATE)
new_transitions.push_back(Transition(src, target));
}
normalize_given_transitions(new_transitions);
transitions = move(new_transitions);
}
}
compute_locally_equivalent_labels();
num_states = new_num_states;
init_state = abstraction_mapping[init_state];
if (verbosity >= utils::Verbosity::VERBOSE && init_state == PRUNED_STATE) {
utils::g_log << tag() << "initial state pruned; task unsolvable" << endl;
}
assert(are_transitions_sorted_unique());
assert(in_sync_with_label_equivalence_relation());
}
void TransitionSystem::apply_label_reduction(
const vector<pair<int, vector<int>>> &label_mapping,
bool only_equivalent_labels) {
assert(are_transitions_sorted_unique());
assert(in_sync_with_label_equivalence_relation());
/*
We iterate over the given label mapping, treating every new label and
the reduced old labels separately. We further distinguish the case
where we know that the reduced labels are all from the same equivalence
class from the case where we may combine arbitrary labels.
The case where only equivalent labels are combined is simple: remove all
old labels from the label group and add the new one.
The other case is more involved: again remove all old labels from their
groups, and the groups themselves if they become empty. Also collect
the transitions of all reduced labels. Add a new group for every new
label and assign the collected transitions to this group. Recompute the
cost of all groups and compute locally equivalent labels.
NOTE: Previously, this latter case was computed in a more incremental
fashion: Rather than recomputing cost of all groups, we only recomputed
cost for groups from which we actually removed labels (hence temporarily
storing these affected groups). Furthermore, rather than computing
locally equivalent labels from scratch, we did not per default add a new
group for every label, but checked for an existing equivalent label
group. In issue539, it turned out that this incremental fashion of
computation does not accelerate the computation.
*/
if (only_equivalent_labels) {
label_equivalence_relation->apply_label_mapping(label_mapping);
} else {
/*
Go over all mappings, collect transitions of old groups and
remember all affected group IDs. This needs to happen *before*
updating label_equivalence_relation, because after updating it,
we cannot find out the group ID of reduced labels anymore.
*/
vector<vector<Transition>> new_transitions;
new_transitions.reserve(label_mapping.size());
unordered_set<int> affected_group_ids;
for (const pair<int, vector<int>> &mapping: label_mapping) {
const vector<int> &old_label_nos = mapping.second;
assert(old_label_nos.size() >= 2);
unordered_set<int> seen_group_ids;
set<Transition> new_label_transitions;
for (int old_label_no : old_label_nos) {
int group_id = label_equivalence_relation->get_group_id(old_label_no);
if (seen_group_ids.insert(group_id).second) {
affected_group_ids.insert(group_id);
const vector<Transition> &transitions = transitions_by_group_id[group_id];
new_label_transitions.insert(transitions.begin(), transitions.end());
}
}
new_transitions.emplace_back(
new_label_transitions.begin(), new_label_transitions.end());
}
assert(label_mapping.size() == new_transitions.size());
/*
Apply all label mappings to label_equivalence_relation. This needs
to happen *before* we can add the new transitions to this transition
systems and *before* we can remove empty groups of old labels,
because only after updating label_equivalence_relation, we know the
group ID of the new labels and which old groups became empty.
*/
label_equivalence_relation->apply_label_mapping(label_mapping, &affected_group_ids);
/*
Go over the transitions of new labels and add them at the correct
position.
NOTE: it is important that this happens in increasing order of label
numbers to ensure that transitions_by_group_id are synchronized with
label groups of label_equivalence_relation.
*/
for (size_t i = 0; i < label_mapping.size(); ++i) {
vector<Transition> &transitions = new_transitions[i];
assert(label_equivalence_relation->get_group_id(label_mapping[i].first)
== static_cast<int>(transitions_by_group_id.size()));
transitions_by_group_id.push_back(move(transitions));
}
// Go over all affected group IDs and remove their transitions if the
// group is empty.
for (int group_id : affected_group_ids) {
if (label_equivalence_relation->is_empty_group(group_id)) {
utils::release_vector_memory(transitions_by_group_id[group_id]);
}
}
compute_locally_equivalent_labels();
}
assert(are_transitions_sorted_unique());
assert(in_sync_with_label_equivalence_relation());
}
string TransitionSystem::tag() const {
string desc(get_description());
desc[0] = toupper(desc[0]);
return desc + ": ";
}
bool TransitionSystem::are_transitions_sorted_unique() const {
for (GroupAndTransitions gat : *this) {
if (!utils::is_sorted_unique(gat.transitions))
return false;
}
return true;
}
bool TransitionSystem::in_sync_with_label_equivalence_relation() const {
return label_equivalence_relation->get_size() ==
static_cast<int>(transitions_by_group_id.size());
}
bool TransitionSystem::is_solvable(const Distances &distances) const {
if (init_state == PRUNED_STATE) {
return false;
}
if (distances.are_goal_distances_computed() && distances.get_goal_distance(init_state) == INF) {
return false;
}
return true;
}
int TransitionSystem::compute_total_transitions() const {
int total = 0;
for (GroupAndTransitions gat : *this) {
total += gat.transitions.size();
}
return total;
}
string TransitionSystem::get_description() const {
ostringstream s;
if (incorporated_variables.size() == 1) {
s << "atomic transition system #" << *incorporated_variables.begin();
} else {
s << "composite transition system with "
<< incorporated_variables.size() << "/" << num_variables << " vars";
}
return s.str();
}
void TransitionSystem::dump_dot_graph() const {
assert(are_transitions_sorted_unique());
utils::g_log << "digraph transition_system";
for (size_t i = 0; i < incorporated_variables.size(); ++i)
utils::g_log << "_" << incorporated_variables[i];
utils::g_log << " {" << endl;
utils::g_log << " node [shape = none] start;" << endl;
for (int i = 0; i < num_states; ++i) {
bool is_init = (i == init_state);
bool is_goal = goal_states[i];
utils::g_log << " node [shape = " << (is_goal ? "doublecircle" : "circle")
<< "] node" << i << ";" << endl;
if (is_init)
utils::g_log << " start -> node" << i << ";" << endl;
}
for (GroupAndTransitions gat : *this) {
const LabelGroup &label_group = gat.label_group;
const vector<Transition> &transitions = gat.transitions;
for (const Transition &transition : transitions) {
int src = transition.src;
int target = transition.target;
utils::g_log << " node" << src << " -> node" << target << " [label = ";
for (LabelConstIter label_it = label_group.begin();
label_it != label_group.end(); ++label_it) {
if (label_it != label_group.begin())
utils::g_log << "_";
utils::g_log << "x" << *label_it;
}
utils::g_log << "];" << endl;
}
}
utils::g_log << "}" << endl;
}
void TransitionSystem::dump_labels_and_transitions() const {
utils::g_log << tag() << "transitions" << endl;
for (GroupAndTransitions gat : *this) {
const LabelGroup &label_group = gat.label_group;
// utils::g_log << "group ID: " << ts_it.get_id() << endl;
utils::g_log << "labels: ";
for (LabelConstIter label_it = label_group.begin();
label_it != label_group.end(); ++label_it) {
if (label_it != label_group.begin())
utils::g_log << ",";
utils::g_log << *label_it;
}
utils::g_log << endl;
utils::g_log << "transitions: ";
const vector<Transition> &transitions = gat.transitions;
for (size_t i = 0; i < transitions.size(); ++i) {
int src = transitions[i].src;
int target = transitions[i].target;
if (i != 0)
utils::g_log << ",";
utils::g_log << src << " -> " << target;
}
utils::g_log << endl;
utils::g_log << "cost: " << label_group.get_cost() << endl;
}
}
void TransitionSystem::statistics() const {
utils::g_log << tag() << get_size() << " states, "
<< compute_total_transitions() << " arcs " << endl;
}
}
| 22,046 |
C++
| 39.15847 | 100 | 0.621791 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_tree.h
|
#ifndef MERGE_AND_SHRINK_MERGE_TREE_H
#define MERGE_AND_SHRINK_MERGE_TREE_H
#include <memory>
#include <utility>
namespace utils {
class RandomNumberGenerator;
}
namespace merge_and_shrink {
extern const int UNINITIALIZED;
/*
Binary tree data structure with convenience methods for removing
sibling leaf nodes and modifying the tree by removing specific
leaf nodes that are not siblings (see also MergeTree class).
*/
struct MergeTreeNode {
MergeTreeNode *parent;
MergeTreeNode *left_child;
MergeTreeNode *right_child;
int ts_index;
MergeTreeNode() = delete;
// Copy constructor. Does not set parent pointers.
MergeTreeNode(const MergeTreeNode &other);
MergeTreeNode(int ts_index);
MergeTreeNode(MergeTreeNode *left_child, MergeTreeNode *right_child);
~MergeTreeNode();
MergeTreeNode *get_left_most_sibling();
std::pair<int, int> erase_children_and_set_index(int new_index);
// Find the parent node for the given index.
MergeTreeNode *get_parent_of_ts_index(int index);
int compute_num_internal_nodes() const;
void inorder(int offset, int current_indentation) const;
bool is_leaf() const {
return !left_child && !right_child;
}
bool has_two_leaf_children() const {
return left_child && right_child &&
left_child->is_leaf() && right_child->is_leaf();
}
};
enum class UpdateOption {
USE_FIRST,
USE_SECOND,
USE_RANDOM
};
/*
This class manages a binary tree data structure (MergeTreeNode) that
represents a merge tree.
In the common use case, the merge tree is used as "the merge strategy"
and hence it is always synchronized with the current factored transition
system managed by the merge-and-shrink heuristic. In that case, when asked
for a next merge, the *left-most* sibling leaf pair is returned and their
parent node updated to represent the resulting composite transition system.
NOTE: returning the left-most sibling leaf pair does not allow to represent
arbitrary merge strategies with this class, because there is no possibility
to specify the merge order of current sibling leaf nodes in an arbitrary
way. For existing precomputed merge strategies like the linear ones or MIASM,
this does not matter.
For the less common use case of using a merge tree within another merge
strategy where the merge tree acts as a fallback mechanism, the merge tree
has to be kept synchronized with the factored transition system. This
requires informing the merge tree about all merges that happen and that may
differ from what the merge tree prescribes. The method update provides this
functionality, using the user specified choice update_option to choose one
of two possible leaf nodes representing the indices of the given merge as the
future node representing the merge.
*/
class MergeTree {
MergeTreeNode *root;
std::shared_ptr<utils::RandomNumberGenerator> rng;
UpdateOption update_option;
/*
Find the two parents (can be the same) of the given indices. The first
one will correspond to a merge that would have been merged earlier in
the merge tree than the second one.
*/
std::pair<MergeTreeNode *, MergeTreeNode *> get_parents_of_ts_indices(
const std::pair<int, int> &ts_indices, int new_index);
MergeTree() = delete;
public:
MergeTree(
MergeTreeNode *root,
const std::shared_ptr<utils::RandomNumberGenerator> &rng,
UpdateOption update_option);
~MergeTree();
std::pair<int, int> get_next_merge(int new_index);
/*
Inform the merge tree about a merge that happened independently of
using the tree's method get_next_merge.
*/
void update(std::pair<int, int> merge, int new_index);
bool done() const {
return root->is_leaf();
}
int compute_num_internal_nodes() const {
return root->compute_num_internal_nodes();
}
// NOTE: this performs the "inverted" inorder_traversal, i.e. from right
// to left, so that the printed tree matches the correct left-to-right
// order.
void inorder_traversal(int indentation_offset) const;
};
}
#endif
| 4,180 |
C
| 33.553719 | 79 | 0.714354 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_stateless.cc
|
#include "merge_strategy_factory_stateless.h"
#include "merge_selector.h"
#include "merge_strategy_stateless.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/memory.h"
using namespace std;
namespace merge_and_shrink {
MergeStrategyFactoryStateless::MergeStrategyFactoryStateless(
options::Options &options)
: merge_selector(options.get<shared_ptr<MergeSelector>>("merge_selector")) {
}
unique_ptr<MergeStrategy> MergeStrategyFactoryStateless::compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) {
merge_selector->initialize(task_proxy);
return utils::make_unique_ptr<MergeStrategyStateless>(fts, merge_selector);
}
string MergeStrategyFactoryStateless::name() const {
return "stateless";
}
void MergeStrategyFactoryStateless::dump_strategy_specific_options() const {
merge_selector->dump_options();
}
bool MergeStrategyFactoryStateless::requires_init_distances() const {
return merge_selector->requires_init_distances();
}
bool MergeStrategyFactoryStateless::requires_goal_distances() const {
return merge_selector->requires_goal_distances();
}
static shared_ptr<MergeStrategyFactory>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Stateless merge strategy",
"This merge strategy has a merge selector, which computes the next "
"merge only depending on the current state of the factored transition "
"system, not requiring any additional information.");
parser.document_note(
"Note",
"Examples include the DFP merge strategy, which can be obtained using:\n"
"{{{\n"
"merge_strategy=merge_stateless(merge_selector=score_based_filtering("
"scoring_functions=[goal_relevance,dfp,total_order(<order_option>))]))"
"\n}}}\n"
"and the (dynamic/score-based) MIASM strategy, which can be obtained "
"using:\n"
"{{{\n"
"merge_strategy=merge_stateless(merge_selector=score_based_filtering("
"scoring_functions=[sf_miasm(<shrinking_options>),total_order(<order_option>)]"
"\n}}}");
parser.add_option<shared_ptr<MergeSelector>>(
"merge_selector",
"The merge selector to be used.");
options::Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeStrategyFactoryStateless>(opts);
}
static options::Plugin<MergeStrategyFactory> _plugin("merge_stateless", _parse);
}
| 2,562 |
C++
| 33.173333 | 87 | 0.704918 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_fh.cc
|
#include "shrink_fh.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "transition_system.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <map>
#include <memory>
#include <vector>
using namespace std;
namespace merge_and_shrink {
ShrinkFH::ShrinkFH(const Options &opts)
: ShrinkBucketBased(opts),
f_start(opts.get<HighLow>("shrink_f")),
h_start(opts.get<HighLow>("shrink_h")) {
}
vector<ShrinkBucketBased::Bucket> ShrinkFH::partition_into_buckets(
const TransitionSystem &ts,
const Distances &distances) const {
assert(distances.are_init_distances_computed());
assert(distances.are_goal_distances_computed());
int max_h = 0;
int max_f = 0;
for (int state = 0; state < ts.get_size(); ++state) {
int g = distances.get_init_distance(state);
int h = distances.get_goal_distance(state);
int f;
if (g == INF || h == INF) {
/*
If not pruning unreachable or irrelevant states, we may have
states with g- or h-values of infinity, which we need to treat
manually here to avoid overflow.
Also note that not using full pruning means that if there is at
least one dead state, this strategy will always use the
map-based approach for partitioning. This is important because
the vector-based approach requires that there are no dead states.
*/
f = INF;
} else {
f = g + h;
}
max_h = max(max_h, h);
max_f = max(max_f, f);
}
// Calculate with double to avoid overflow.
if (static_cast<double>(max_f) * max_f / 2.0 > ts.get_size()) {
// Use map because an average bucket in the vector structure
// would contain less than 1 element (roughly).
return ordered_buckets_use_map(ts, distances);
} else {
return ordered_buckets_use_vector(ts, distances, max_f, max_h);
}
}
// Helper function for ordered_buckets_use_map.
template<class HIterator, class Bucket>
static void collect_h_buckets(
HIterator begin, HIterator end,
vector<Bucket> &buckets) {
for (HIterator iter = begin; iter != end; ++iter) {
Bucket &bucket = iter->second;
assert(!bucket.empty());
buckets.push_back(Bucket());
buckets.back().swap(bucket);
}
}
// Helper function for ordered_buckets_use_map.
template<class FHIterator, class Bucket>
static void collect_f_h_buckets(
FHIterator begin, FHIterator end,
ShrinkFH::HighLow h_start,
vector<Bucket> &buckets) {
for (FHIterator iter = begin; iter != end; ++iter) {
if (h_start == ShrinkFH::HighLow::HIGH) {
collect_h_buckets(iter->second.rbegin(), iter->second.rend(),
buckets);
} else {
collect_h_buckets(iter->second.begin(), iter->second.end(),
buckets);
}
}
}
vector<ShrinkBucketBased::Bucket> ShrinkFH::ordered_buckets_use_map(
const TransitionSystem &ts,
const Distances &distances) const {
map<int, map<int, Bucket>> states_by_f_and_h;
int bucket_count = 0;
int num_states = ts.get_size();
for (int state = 0; state < num_states; ++state) {
int g = distances.get_init_distance(state);
int h = distances.get_goal_distance(state);
int f;
if (g == INF || h == INF) {
f = INF;
} else {
f = g + h;
}
Bucket &bucket = states_by_f_and_h[f][h];
if (bucket.empty())
++bucket_count;
bucket.push_back(state);
}
vector<Bucket> buckets;
buckets.reserve(bucket_count);
if (f_start == HighLow::HIGH) {
collect_f_h_buckets(
states_by_f_and_h.rbegin(), states_by_f_and_h.rend(),
h_start, buckets);
} else {
collect_f_h_buckets(
states_by_f_and_h.begin(), states_by_f_and_h.end(),
h_start, buckets);
}
assert(static_cast<int>(buckets.size()) == bucket_count);
return buckets;
}
vector<ShrinkBucketBased::Bucket> ShrinkFH::ordered_buckets_use_vector(
const TransitionSystem &ts,
const Distances &distances,
int max_f,
int max_h) const {
vector<vector<Bucket>> states_by_f_and_h;
states_by_f_and_h.resize(max_f + 1);
for (int f = 0; f <= max_f; ++f)
states_by_f_and_h[f].resize(min(f, max_h) + 1);
int bucket_count = 0;
int num_states = ts.get_size();
for (int state = 0; state < num_states; ++state) {
int g = distances.get_init_distance(state);
int h = distances.get_goal_distance(state);
// If the state is dead, we should use ordered_buckets_use_map instead.
assert(g != INF && h != INF);
int f = g + h;
assert(utils::in_bounds(f, states_by_f_and_h));
assert(utils::in_bounds(h, states_by_f_and_h[f]));
Bucket &bucket = states_by_f_and_h[f][h];
if (bucket.empty())
++bucket_count;
bucket.push_back(state);
}
vector<Bucket> buckets;
buckets.reserve(bucket_count);
int f_init = (f_start == HighLow::HIGH ? max_f : 0);
int f_end = (f_start == HighLow::HIGH ? 0 : max_f);
int f_incr = (f_init > f_end ? -1 : 1);
for (int f = f_init; f != f_end + f_incr; f += f_incr) {
int h_init = (h_start == HighLow::HIGH ? states_by_f_and_h[f].size() - 1 : 0);
int h_end = (h_start == HighLow::HIGH ? 0 : states_by_f_and_h[f].size() - 1);
int h_incr = (h_init > h_end ? -1 : 1);
for (int h = h_init; h != h_end + h_incr; h += h_incr) {
Bucket &bucket = states_by_f_and_h[f][h];
if (!bucket.empty()) {
buckets.push_back(Bucket());
buckets.back().swap(bucket);
}
}
}
assert(static_cast<int>(buckets.size()) == bucket_count);
return buckets;
}
string ShrinkFH::name() const {
return "f-preserving";
}
void ShrinkFH::dump_strategy_specific_options() const {
utils::g_log << "Prefer shrinking high or low f states: "
<< (f_start == HighLow::HIGH ? "high" : "low") << endl
<< "Prefer shrinking high or low h states: "
<< (h_start == HighLow::HIGH ? "high" : "low") << endl;
}
static shared_ptr<ShrinkStrategy>_parse(OptionParser &parser) {
parser.document_synopsis(
"f-preserving shrink strategy",
"This shrink strategy implements the algorithm described in"
" the paper:" + utils::format_conference_reference(
{"Malte Helmert", "Patrik Haslum", "Joerg Hoffmann"},
"Flexible Abstraction Heuristics for Optimal Sequential Planning",
"https://ai.dmi.unibas.ch/papers/helmert-et-al-icaps2007.pdf",
"Proceedings of the Seventeenth International Conference on"
" Automated Planning and Scheduling (ICAPS 2007)",
"176-183",
"AAAI Press",
"2007"));
parser.document_note(
"shrink_fh()",
"Combine this with the merge-and-shrink option max_states=N (where N "
"is a numerical parameter for which sensible values include 1000, "
"10000, 50000, 100000 and 200000) and the linear merge startegy "
"cg_goal_level to obtain the variant 'f-preserving shrinking of "
"transition systems', called called HHH in the IJCAI 2011 paper, see "
"bisimulation based shrink strategy. "
"When we last ran experiments on interaction of shrink strategies "
"with label reduction, this strategy performed best when used with "
"label reduction before merging (and no label reduction before "
"shrinking). "
"We also recommend using full pruning with this shrink strategy, "
"because both distances from the initial state and to the goal states "
"must be computed anyway, and because the existence of only one "
"dead state causes this shrink strategy to always use the map-based "
"approach for partitioning states rather than the more efficient "
"vector-based approach.");
ShrinkBucketBased::add_options_to_parser(parser);
vector<string> high_low;
high_low.push_back("HIGH");
high_low.push_back("LOW");
parser.add_enum_option<ShrinkFH::HighLow>(
"shrink_f", high_low,
"prefer shrinking states with high or low f values",
"HIGH");
parser.add_enum_option<ShrinkFH::HighLow>(
"shrink_h", high_low,
"prefer shrinking states with high or low h values",
"LOW");
Options opts = parser.parse();
if (parser.help_mode())
return nullptr;
if (parser.dry_run())
return nullptr;
else
return make_shared<ShrinkFH>(opts);
}
static Plugin<ShrinkStrategy> _plugin("shrink_fh", _parse);
}
| 9,052 |
C++
| 35.651822 | 86 | 0.595449 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_tree_factory_linear.h
|
#ifndef MERGE_AND_SHRINK_MERGE_TREE_FACTORY_LINEAR_H
#define MERGE_AND_SHRINK_MERGE_TREE_FACTORY_LINEAR_H
#include "merge_tree_factory.h"
#include "../task_utils/variable_order_finder.h"
namespace merge_and_shrink {
class MergeTreeFactoryLinear : public MergeTreeFactory {
variable_order_finder::VariableOrderType variable_order_type;
protected:
virtual std::string name() const override;
virtual void dump_tree_specific_options() const override;
public:
explicit MergeTreeFactoryLinear(const options::Options &options);
virtual ~MergeTreeFactoryLinear() override = default;
virtual std::unique_ptr<MergeTree> compute_merge_tree(
const TaskProxy &task_proxy) override;
virtual std::unique_ptr<MergeTree> compute_merge_tree(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts,
const std::vector<int> &indices_subset) override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return false;
}
static void add_options_to_parser(options::OptionParser &parser);
};
}
#endif
| 1,165 |
C
| 30.513513 | 69 | 0.727897 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_dfp.cc
|
#include "merge_scoring_function_dfp.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "label_equivalence_relation.h"
#include "labels.h"
#include "transition_system.h"
#include "../options/option_parser.h"
#include "../options/plugin.h"
#include "../utils/markup.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
vector<int> MergeScoringFunctionDFP::compute_label_ranks(
const FactoredTransitionSystem &fts, int index) const {
const TransitionSystem &ts = fts.get_transition_system(index);
const Distances &distances = fts.get_distances(index);
assert(distances.are_goal_distances_computed());
int num_labels = fts.get_labels().get_size();
// Irrelevant (and inactive, i.e. reduced) labels have a dummy rank of -1
vector<int> label_ranks(num_labels, -1);
for (GroupAndTransitions gat : ts) {
const LabelGroup &label_group = gat.label_group;
const vector<Transition> &transitions = gat.transitions;
// Relevant labels with no transitions have a rank of infinity.
int label_rank = INF;
bool group_relevant = false;
if (static_cast<int>(transitions.size()) == ts.get_size()) {
/*
A label group is irrelevant in the earlier notion if it has
exactly a self loop transition for every state.
*/
for (const Transition &transition : transitions) {
if (transition.target != transition.src) {
group_relevant = true;
break;
}
}
} else {
group_relevant = true;
}
if (!group_relevant) {
label_rank = -1;
} else {
for (const Transition &transition : transitions) {
label_rank = min(label_rank,
distances.get_goal_distance(transition.target));
}
}
for (int label_no : label_group) {
label_ranks[label_no] = label_rank;
}
}
return label_ranks;
}
vector<double> MergeScoringFunctionDFP::compute_scores(
const FactoredTransitionSystem &fts,
const vector<pair<int, int>> &merge_candidates) {
int num_ts = fts.get_size();
vector<vector<int>> transition_system_label_ranks(num_ts);
vector<double> scores;
scores.reserve(merge_candidates.size());
// Go over all pairs of transition systems and compute their weight.
for (pair<int, int> merge_candidate : merge_candidates) {
int ts_index1 = merge_candidate.first;
int ts_index2 = merge_candidate.second;
vector<int> &label_ranks1 = transition_system_label_ranks[ts_index1];
if (label_ranks1.empty()) {
label_ranks1 = compute_label_ranks(fts, ts_index1);
}
vector<int> &label_ranks2 = transition_system_label_ranks[ts_index2];
if (label_ranks2.empty()) {
label_ranks2 = compute_label_ranks(fts, ts_index2);
}
assert(label_ranks1.size() == label_ranks2.size());
// Compute the weight associated with this pair
int pair_weight = INF;
for (size_t i = 0; i < label_ranks1.size(); ++i) {
if (label_ranks1[i] != -1 && label_ranks2[i] != -1) {
// label is relevant in both transition_systems
int max_label_rank = max(label_ranks1[i], label_ranks2[i]);
pair_weight = min(pair_weight, max_label_rank);
}
}
scores.push_back(pair_weight);
}
return scores;
}
string MergeScoringFunctionDFP::name() const {
return "dfp";
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"DFP scoring",
"This scoring function computes the 'DFP' score as descrdibed in the "
"paper \"Directed model checking with distance-preserving abstractions\" "
"by Draeger, Finkbeiner and Podelski (SPIN 2006), adapted to planning in "
"the following paper:" + utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"Generalized Label Reduction for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-aaai2014.pdf",
"Proceedings of the 28th AAAI Conference on Artificial"
" Intelligence (AAAI 2014)",
"2358-2366",
"AAAI Press",
"2014"));
parser.document_note(
"Note",
"To obtain the configurations called DFP-B-50K described in the paper, "
"use the following configuration of the merge-and-shrink heuristic "
"and adapt the tie-breaking criteria of {{{total_order}}} as desired:\n "
"{{{\nmerge_and_shrink(merge_strategy=merge_stateless(merge_selector="
"score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order("
"atomic_ts_order=reverse_level,product_ts_order=new_to_old,"
"atomic_before_product=true)])),shrink_strategy=shrink_bisimulation("
"greedy=false),label_reduction=exact(before_shrinking=true,"
"before_merging=false),max_states=50000,threshold_before_merge=1)\n}}}");
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeScoringFunctionDFP>();
}
static options::Plugin<MergeScoringFunction> _plugin("dfp", _parse);
}
| 5,425 |
C++
| 37.482269 | 82 | 0.619724 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/label_reduction.h
|
#ifndef MERGE_AND_SHRINK_LABEL_REDUCTION_H
#define MERGE_AND_SHRINK_LABEL_REDUCTION_H
#include <memory>
#include <vector>
class TaskProxy;
namespace equivalence_relation {
class EquivalenceRelation;
}
namespace options {
class Options;
}
namespace utils {
class RandomNumberGenerator;
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
/*
two_transition_systems: compute the 'combinable relation'
for labels only for the two transition_systems that will
be merged next and reduce labels.
all_transition_systems: compute the 'combinable relation'
for labels once for every transition_system and reduce
labels.
all_transition_systems_with_fixpoint: keep computing the
'combinable relation' for labels iteratively for all
transition systems until no more labels can be reduced.
*/
enum class LabelReductionMethod {
TWO_TRANSITION_SYSTEMS,
ALL_TRANSITION_SYSTEMS,
ALL_TRANSITION_SYSTEMS_WITH_FIXPOINT
};
/*
Order in which iterations of label reduction considers the set of all
transition systems. Regular is the fast downward order plus appending
new composite transition systems after the atomic ones, reverse is the
reversed regular order and random is a random one. All orders are
precomputed and reused for every call to reduce().
*/
enum class LabelReductionSystemOrder {
REGULAR,
REVERSE,
RANDOM
};
class LabelReduction {
// Options for label reduction
std::vector<int> transition_system_order;
bool lr_before_shrinking;
bool lr_before_merging;
LabelReductionMethod lr_method;
LabelReductionSystemOrder lr_system_order;
std::shared_ptr<utils::RandomNumberGenerator> rng;
bool initialized() const;
/* Apply the given label equivalence relation to the set of labels and
compute the resulting label mapping. */
void compute_label_mapping(
const equivalence_relation::EquivalenceRelation *relation,
const FactoredTransitionSystem &fts,
std::vector<std::pair<int, std::vector<int>>> &label_mapping,
utils::Verbosity verbosity) const;
equivalence_relation::EquivalenceRelation
*compute_combinable_equivalence_relation(
int ts_index,
const FactoredTransitionSystem &fts) const;
public:
explicit LabelReduction(const options::Options &options);
void initialize(const TaskProxy &task_proxy);
bool reduce(
const std::pair<int, int> &next_merge,
FactoredTransitionSystem &fts,
utils::Verbosity verbosity) const;
void dump_options() const;
bool reduce_before_shrinking() const {
return lr_before_shrinking;
}
bool reduce_before_merging() const {
return lr_before_merging;
}
};
}
#endif
| 2,749 |
C
| 27.947368 | 74 | 0.73554 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_tree.cc
|
#include "merge_tree.h"
#include "../utils/logging.h"
#include "../utils/rng.h"
#include "../utils/system.h"
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
const int UNINITIALIZED = -1;
MergeTreeNode::MergeTreeNode(const MergeTreeNode &other)
: parent(nullptr),
left_child(nullptr),
right_child(nullptr),
ts_index(other.ts_index) {
if (other.left_child) {
left_child = new MergeTreeNode(*other.left_child);
}
if (other.right_child) {
right_child = new MergeTreeNode(*other.right_child);
}
}
MergeTreeNode::MergeTreeNode(int ts_index)
: parent(nullptr),
left_child(nullptr),
right_child(nullptr),
ts_index(ts_index) {
}
MergeTreeNode::MergeTreeNode(
MergeTreeNode *left_child,
MergeTreeNode *right_child)
: parent(nullptr),
left_child(left_child),
right_child(right_child),
ts_index(UNINITIALIZED) {
left_child->parent = this;
right_child->parent = this;
}
MergeTreeNode::~MergeTreeNode() {
delete left_child;
delete right_child;
left_child = nullptr;
right_child = nullptr;
}
MergeTreeNode *MergeTreeNode::get_left_most_sibling() {
if (has_two_leaf_children()) {
return this;
}
assert(left_child && right_child);
if (!left_child->is_leaf()) {
return left_child->get_left_most_sibling();
}
assert(!right_child->is_leaf());
return right_child->get_left_most_sibling();
}
pair<int, int> MergeTreeNode::erase_children_and_set_index(int new_index) {
assert(has_two_leaf_children());
int left_child_index = left_child->ts_index;
int right_child_index = right_child->ts_index;
assert(left_child_index != UNINITIALIZED);
assert(right_child_index != UNINITIALIZED);
delete left_child;
delete right_child;
left_child = nullptr;
right_child = nullptr;
assert(ts_index == UNINITIALIZED);
ts_index = new_index;
return make_pair(left_child_index, right_child_index);
}
MergeTreeNode *MergeTreeNode::get_parent_of_ts_index(int index) {
if (left_child && left_child->is_leaf() && left_child->ts_index == index) {
return this;
}
if (right_child && right_child->is_leaf() && right_child->ts_index == index) {
return this;
}
MergeTreeNode *parent = nullptr;
if (left_child) {
parent = left_child->get_parent_of_ts_index(index);
}
if (parent) {
return parent;
}
if (right_child) {
parent = right_child->get_parent_of_ts_index(index);
}
return parent;
}
int MergeTreeNode::compute_num_internal_nodes() const {
if (is_leaf()) {
return 0;
} else {
int number_of_internal_nodes = 1; // count the node itself
if (left_child) {
number_of_internal_nodes += left_child->compute_num_internal_nodes();
}
if (right_child) {
number_of_internal_nodes += right_child->compute_num_internal_nodes();
}
return number_of_internal_nodes;
}
}
void MergeTreeNode::inorder(int offset, int current_indentation) const {
if (right_child) {
right_child->inorder(offset, current_indentation + offset);
}
for (int i = 0; i < current_indentation; ++i) {
utils::g_log << " ";
}
utils::g_log << ts_index << endl;
if (left_child) {
left_child->inorder(offset, current_indentation + offset);
}
}
MergeTree::MergeTree(
MergeTreeNode *root,
const shared_ptr<utils::RandomNumberGenerator> &rng,
UpdateOption update_option)
: root(root), rng(rng), update_option(update_option) {
}
MergeTree::~MergeTree() {
delete root;
root = nullptr;
}
pair<int, int> MergeTree::get_next_merge(int new_index) {
MergeTreeNode *next_merge = root->get_left_most_sibling();
return next_merge->erase_children_and_set_index(new_index);
}
pair<MergeTreeNode *, MergeTreeNode *> MergeTree::get_parents_of_ts_indices(
const pair<int, int> &ts_indices, int new_index) {
int ts_index1 = ts_indices.first;
int ts_index2 = ts_indices.second;
bool use_first_index_for_first_parent = true;
// Copy the tree and ask it for next merges until found both indices.
MergeTreeNode *copy = new MergeTreeNode(*root);
int found_indices = 0;
while (!copy->is_leaf()) {
MergeTreeNode *next_merge = copy->get_left_most_sibling();
pair<int, int> merge = next_merge->erase_children_and_set_index(new_index);
if (merge.first == ts_index1 || merge.second == ts_index1) {
++found_indices;
}
if (merge.first == ts_index2 || merge.second == ts_index2) {
if (!found_indices) {
use_first_index_for_first_parent = false;
}
++found_indices;
}
if (found_indices == 2) {
break;
}
}
delete copy;
pair<MergeTreeNode *, MergeTreeNode *> result = make_pair(nullptr, nullptr);
if (use_first_index_for_first_parent) {
result.first = root->get_parent_of_ts_index(ts_index1);
result.second = root->get_parent_of_ts_index(ts_index2);
} else {
result.first = root->get_parent_of_ts_index(ts_index2);
result.second = root->get_parent_of_ts_index(ts_index1);
}
return result;
}
void MergeTree::update(pair<int, int> merge, int new_index) {
int ts_index1 = merge.first;
int ts_index2 = merge.second;
assert(root->ts_index != ts_index1 && root->ts_index != ts_index2);
pair<MergeTreeNode *, MergeTreeNode *> parents =
get_parents_of_ts_indices(merge, new_index);
MergeTreeNode *first_parent = parents.first;
MergeTreeNode *second_parent = parents.second;
if (first_parent == second_parent) { // given merge already in the tree
first_parent->erase_children_and_set_index(new_index);
} else {
MergeTreeNode *surviving_node = nullptr;
MergeTreeNode *removed_node = nullptr;
if (update_option == UpdateOption::USE_FIRST) {
surviving_node = first_parent;
removed_node = second_parent;
} else if (update_option == UpdateOption::USE_SECOND) {
surviving_node = second_parent;
removed_node = first_parent;
} else if (update_option == UpdateOption::USE_RANDOM) {
int random = (*rng)(2);
surviving_node = (random == 0 ? first_parent : second_parent);
removed_node = (random == 0 ? second_parent : first_parent);
} else {
ABORT("Unknown merge tree update option");
}
// Update the leaf node corresponding to one of the indices to
// correspond to the merged index.
MergeTreeNode *surviving_leaf = nullptr;
if (surviving_node->left_child->ts_index == ts_index1 ||
surviving_node->left_child->ts_index == ts_index2) {
surviving_leaf = surviving_node->left_child;
} else {
assert(surviving_node->right_child->ts_index == ts_index1 ||
surviving_node->right_child->ts_index == ts_index2);
surviving_leaf = surviving_node->right_child;
}
surviving_leaf->ts_index = new_index;
// Remove all links to removed_node and store pointers to the
// relevant children and its parent.
MergeTreeNode *parent_of_removed_node = removed_node->parent;
if (parent_of_removed_node) {
// parent_of_removed_node can be nullptr if removed_node
// is the root node
if (parent_of_removed_node->left_child == removed_node) {
parent_of_removed_node->left_child = nullptr;
} else {
assert(parent_of_removed_node->right_child == removed_node);
parent_of_removed_node->right_child = nullptr;
}
}
MergeTreeNode *surviving_child_of_removed_node = nullptr;
/*
Find the child of remove_node that should survive (i.e. the node that
does not correspond to the merged indices) and set it to null so that
deleting removed_node later does not delete (via destructor) the
surviving child.
*/
if (removed_node->left_child->ts_index == ts_index1 ||
removed_node->left_child->ts_index == ts_index2) {
surviving_child_of_removed_node = removed_node->right_child;
removed_node->right_child = nullptr;
} else {
assert(removed_node->right_child->ts_index == ts_index1 ||
removed_node->right_child->ts_index == ts_index2);
surviving_child_of_removed_node = removed_node->left_child;
removed_node->left_child = nullptr;
}
if (removed_node == root) {
root = surviving_child_of_removed_node;
}
// Finally delete removed_node (this also deletes its child
//corresponding to one of the merged indices, but not the other one).
delete removed_node;
removed_node = nullptr;
// Update pointers of the surviving child of removed_node and the
// parent of removed_node (if existing) to point to each other.
surviving_child_of_removed_node->parent = parent_of_removed_node;
if (parent_of_removed_node) {
// parent_of_removed_node can be nullptr if removed_node
// was the root node
if (!parent_of_removed_node->left_child) {
parent_of_removed_node->left_child = surviving_child_of_removed_node;
} else {
assert(!parent_of_removed_node->right_child);
parent_of_removed_node->right_child = surviving_child_of_removed_node;
}
}
}
}
void MergeTree::inorder_traversal(int indentation_offset) const {
utils::g_log << "Merge tree, read from left to right (90° rotated tree): "
<< endl;
return root->inorder(indentation_offset, 0);
}
}
| 10,006 |
C++
| 33.270548 | 86 | 0.608235 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory.cc
|
#include "merge_strategy_factory.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include <iostream>
using namespace std;
namespace merge_and_shrink {
void MergeStrategyFactory::dump_options() const {
utils::g_log << "Merge strategy options:" << endl;
utils::g_log << "Type: " << name() << endl;
dump_strategy_specific_options();
}
static options::PluginTypePlugin<MergeStrategyFactory> _type_plugin(
"MergeStrategy",
"This page describes the various merge strategies supported "
"by the planner.");
}
| 548 |
C++
| 22.869564 | 68 | 0.693431 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_precomputed.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_PRECOMPUTED_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_PRECOMPUTED_H
#include "merge_strategy.h"
#include <memory>
namespace merge_and_shrink {
class MergeTree;
class MergeStrategyPrecomputed : public MergeStrategy {
std::unique_ptr<MergeTree> merge_tree;
public:
MergeStrategyPrecomputed(
const FactoredTransitionSystem &fts,
std::unique_ptr<MergeTree> merge_tree);
virtual ~MergeStrategyPrecomputed() override = default;
virtual std::pair<int, int> get_next() override;
};
}
#endif
| 559 |
C
| 24.454544 | 59 | 0.745975 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/labels.h
|
#ifndef MERGE_AND_SHRINK_LABELS_H
#define MERGE_AND_SHRINK_LABELS_H
#include <memory>
#include <vector>
namespace merge_and_shrink {
class Label {
/*
This class implements labels as used by merge-and-shrink transition systems.
Labels are opaque tokens that have an associated cost.
*/
int cost;
public:
explicit Label(int cost_)
: cost(cost_) {
}
~Label() {}
int get_cost() const {
return cost;
}
};
/*
This class serves both as a container class to handle the set of all labels
and to perform label reduction on this set.
*/
class Labels {
std::vector<std::unique_ptr<Label>> labels;
int max_size; // the maximum number of labels that can be created
public:
explicit Labels(std::vector<std::unique_ptr<Label>> &&labels);
void reduce_labels(const std::vector<int> &old_label_nos);
bool is_current_label(int label_no) const;
int get_label_cost(int label_no) const;
void dump_labels() const;
int get_size() const {
return labels.size();
}
int get_max_size() const {
return max_size;
}
};
}
#endif
| 1,123 |
C
| 22.914893 | 82 | 0.647373 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/distances.h
|
#ifndef MERGE_AND_SHRINK_DISTANCES_H
#define MERGE_AND_SHRINK_DISTANCES_H
#include "types.h"
#include <cassert>
#include <vector>
/*
TODO: Possible interface improvements for this class:
- Check TODOs in implementation file.
(Many of these would need performance tests, as distance computation
can be one of the bottlenecks in our code.)
*/
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class TransitionSystem;
class Distances {
static const int DISTANCE_UNKNOWN = -1;
const TransitionSystem &transition_system;
std::vector<int> init_distances;
std::vector<int> goal_distances;
bool init_distances_computed;
bool goal_distances_computed;
void clear_distances();
int get_num_states() const;
bool is_unit_cost() const;
void compute_init_distances_unit_cost();
void compute_goal_distances_unit_cost();
void compute_init_distances_general_cost();
void compute_goal_distances_general_cost();
public:
explicit Distances(const TransitionSystem &transition_system);
~Distances() = default;
bool are_init_distances_computed() const {
return init_distances_computed;
}
bool are_goal_distances_computed() const {
return goal_distances_computed;
}
void compute_distances(
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity);
/*
Update distances according to the given abstraction. If the abstraction
is not f-preserving, distances are directly recomputed.
It is OK for the abstraction to drop states, but then all
dropped states must be unreachable or irrelevant. (Otherwise,
the method might fail to detect that the distance information is
out of date.)
*/
void apply_abstraction(
const StateEquivalenceRelation &state_equivalence_relation,
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity);
int get_init_distance(int state) const {
assert(are_init_distances_computed());
return init_distances[state];
}
int get_goal_distance(int state) const {
assert(are_goal_distances_computed());
return goal_distances[state];
}
void dump() const;
void statistics() const;
};
}
#endif
| 2,338 |
C
| 25.579545 | 77 | 0.691617 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_selector_score_based_filtering.cc
|
#include "merge_selector_score_based_filtering.h"
#include "factored_transition_system.h"
#include "merge_scoring_function.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
MergeSelectorScoreBasedFiltering::MergeSelectorScoreBasedFiltering(
const options::Options &options)
: merge_scoring_functions(
options.get_list<shared_ptr<MergeScoringFunction>>(
"scoring_functions")) {
}
vector<pair<int, int>> MergeSelectorScoreBasedFiltering::get_remaining_candidates(
const vector<pair<int, int>> &merge_candidates,
const vector<double> &scores) const {
assert(merge_candidates.size() == scores.size());
double best_score = INF;
for (double score : scores) {
if (score < best_score) {
best_score = score;
}
}
vector<pair<int, int>> result;
for (size_t i = 0; i < scores.size(); ++i) {
if (scores[i] == best_score) {
result.push_back(merge_candidates[i]);
}
}
return result;
}
pair<int, int> MergeSelectorScoreBasedFiltering::select_merge(
const FactoredTransitionSystem &fts,
const vector<int> &indices_subset) const {
vector<pair<int, int>> merge_candidates =
compute_merge_candidates(fts, indices_subset);
for (const shared_ptr<MergeScoringFunction> &scoring_function :
merge_scoring_functions) {
vector<double> scores = scoring_function->compute_scores(
fts, merge_candidates);
merge_candidates = get_remaining_candidates(merge_candidates, scores);
if (merge_candidates.size() == 1) {
break;
}
}
if (merge_candidates.size() > 1) {
cerr << "More than one merge candidate remained after computing all "
"scores! Did you forget to include a uniquely tie-breaking "
"scoring function, e.g. total_order or single_random?" << endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
return merge_candidates.front();
}
void MergeSelectorScoreBasedFiltering::initialize(const TaskProxy &task_proxy) {
for (shared_ptr<MergeScoringFunction> &scoring_function
: merge_scoring_functions) {
scoring_function->initialize(task_proxy);
}
}
string MergeSelectorScoreBasedFiltering::name() const {
return "score based filtering";
}
void MergeSelectorScoreBasedFiltering::dump_specific_options() const {
for (const shared_ptr<MergeScoringFunction> &scoring_function
: merge_scoring_functions) {
scoring_function->dump_options();
}
}
bool MergeSelectorScoreBasedFiltering::requires_init_distances() const {
for (const shared_ptr<MergeScoringFunction> &scoring_function
: merge_scoring_functions) {
if (scoring_function->requires_init_distances()) {
return true;
}
}
return false;
}
bool MergeSelectorScoreBasedFiltering::requires_goal_distances() const {
for (const shared_ptr<MergeScoringFunction> &scoring_function
: merge_scoring_functions) {
if (scoring_function->requires_goal_distances()) {
return true;
}
}
return false;
}
static shared_ptr<MergeSelector>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Score based filtering merge selector",
"This merge selector has a list of scoring functions, which are used "
"iteratively to compute scores for merge candidates, keeping the best "
"ones (with minimal scores) until only one is left.");
parser.add_list_option<shared_ptr<MergeScoringFunction>>(
"scoring_functions",
"The list of scoring functions used to compute scores for candidates.");
options::Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeSelectorScoreBasedFiltering>(opts);
}
static options::Plugin<MergeSelector> _plugin("score_based_filtering", _parse);
}
| 4,082 |
C++
| 31.664 | 82 | 0.669035 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_precomputed.cc
|
#include "merge_strategy_factory_precomputed.h"
#include "merge_strategy_precomputed.h"
#include "merge_tree_factory.h"
#include "merge_tree.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/memory.h"
using namespace std;
namespace merge_and_shrink {
MergeStrategyFactoryPrecomputed::MergeStrategyFactoryPrecomputed(
options::Options &options)
: merge_tree_factory(options.get<shared_ptr<MergeTreeFactory>>("merge_tree")) {
}
unique_ptr<MergeStrategy> MergeStrategyFactoryPrecomputed::compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) {
unique_ptr<MergeTree> merge_tree =
merge_tree_factory->compute_merge_tree(task_proxy);
return utils::make_unique_ptr<MergeStrategyPrecomputed>(fts, move(merge_tree));
}
bool MergeStrategyFactoryPrecomputed::requires_init_distances() const {
return merge_tree_factory->requires_init_distances();
}
bool MergeStrategyFactoryPrecomputed::requires_goal_distances() const {
return merge_tree_factory->requires_goal_distances();
}
string MergeStrategyFactoryPrecomputed::name() const {
return "precomputed";
}
void MergeStrategyFactoryPrecomputed::dump_strategy_specific_options() const {
merge_tree_factory->dump_options();
}
static shared_ptr<MergeStrategyFactory>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Precomputed merge strategy",
"This merge strategy has a precomputed merge tree. Note that this "
"merge strategy does not take into account the current state of "
"the factored transition system. This also means that this merge "
"strategy relies on the factored transition system being synchronized "
"with this merge tree, i.e. all merges are performed exactly as given "
"by the merge tree.");
parser.document_note(
"Note",
"An example of a precomputed merge startegy is a linear merge strategy, "
"which can be obtained using:\n"
"{{{\n"
"merge_strategy=merge_precomputed(merge_tree=linear(<variable_order>))"
"\n}}}");
parser.add_option<shared_ptr<MergeTreeFactory>>(
"merge_tree",
"The precomputed merge tree.");
options::Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeStrategyFactoryPrecomputed>(opts);
}
static options::Plugin<MergeStrategyFactory> _plugin("merge_precomputed", _parse);
}
| 2,546 |
C++
| 33.418918 | 83 | 0.714061 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function.cc
|
#include "merge_scoring_function.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include <iostream>
using namespace std;
namespace merge_and_shrink {
MergeScoringFunction::MergeScoringFunction()
: initialized(false) {
}
void MergeScoringFunction::dump_options() const {
utils::g_log << "Merge scoring function:" << endl;
utils::g_log << "Name: " << name() << endl;
dump_function_specific_options();
}
static options::PluginTypePlugin<MergeScoringFunction> _type_plugin(
"MergeScoringFunction",
"This page describes various merge scoring functions. A scoring function, "
"given a list of merge candidates and a factored transition system, "
"computes a score for each candidate based on this information and "
"potentially some chosen options. Minimal scores are considered best. "
"Scoring functions are currently only used within the score based "
"filtering merge selector.");
}
| 949 |
C++
| 29.64516 | 79 | 0.722866 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/distances.cc
|
#include "distances.h"
#include "label_equivalence_relation.h"
#include "transition_system.h"
#include "../algorithms/priority_queues.h"
#include "../utils/logging.h"
#include <cassert>
#include <deque>
using namespace std;
namespace merge_and_shrink {
const int Distances::DISTANCE_UNKNOWN;
Distances::Distances(const TransitionSystem &transition_system)
: transition_system(transition_system) {
clear_distances();
}
void Distances::clear_distances() {
init_distances_computed = false;
goal_distances_computed = false;
init_distances.clear();
goal_distances.clear();
}
int Distances::get_num_states() const {
return transition_system.get_size();
}
bool Distances::is_unit_cost() const {
/*
TODO: Is this a good implementation? It differs from the
previous implementation in transition_system.cc because that
would require access to more attributes. One nice thing about it
is that it gets at the label cost information in the same way
that the actual shortest-path algorithms (e.g.
compute_goal_distances_general_cost) do.
*/
for (GroupAndTransitions gat : transition_system) {
const LabelGroup &label_group = gat.label_group;
if (label_group.get_cost() != 1)
return false;
}
return true;
}
static void breadth_first_search(
const vector<vector<int>> &graph, deque<int> &queue,
vector<int> &distances) {
while (!queue.empty()) {
int state = queue.front();
queue.pop_front();
for (size_t i = 0; i < graph[state].size(); ++i) {
int successor = graph[state][i];
if (distances[successor] > distances[state] + 1) {
distances[successor] = distances[state] + 1;
queue.push_back(successor);
}
}
}
}
void Distances::compute_init_distances_unit_cost() {
vector<vector<int>> forward_graph(get_num_states());
for (GroupAndTransitions gat : transition_system) {
const vector<Transition> &transitions = gat.transitions;
for (const Transition &transition : transitions) {
forward_graph[transition.src].push_back(transition.target);
}
}
deque<int> queue;
queue.push_back(transition_system.get_init_state());
init_distances[transition_system.get_init_state()] = 0;
breadth_first_search(forward_graph, queue, init_distances);
}
void Distances::compute_goal_distances_unit_cost() {
vector<vector<int>> backward_graph(get_num_states());
for (GroupAndTransitions gat : transition_system) {
const vector<Transition> &transitions = gat.transitions;
for (const Transition &transition : transitions) {
backward_graph[transition.target].push_back(transition.src);
}
}
deque<int> queue;
for (int state = 0; state < get_num_states(); ++state) {
if (transition_system.is_goal_state(state)) {
goal_distances[state] = 0;
queue.push_back(state);
}
}
breadth_first_search(backward_graph, queue, goal_distances);
}
static void dijkstra_search(
const vector<vector<pair<int, int>>> &graph,
priority_queues::AdaptiveQueue<int> &queue,
vector<int> &distances) {
while (!queue.empty()) {
pair<int, int> top_pair = queue.pop();
int distance = top_pair.first;
int state = top_pair.second;
int state_distance = distances[state];
assert(state_distance <= distance);
if (state_distance < distance)
continue;
for (size_t i = 0; i < graph[state].size(); ++i) {
const pair<int, int> &transition = graph[state][i];
int successor = transition.first;
int cost = transition.second;
int successor_cost = state_distance + cost;
if (distances[successor] > successor_cost) {
distances[successor] = successor_cost;
queue.push(successor_cost, successor);
}
}
}
}
void Distances::compute_init_distances_general_cost() {
vector<vector<pair<int, int>>> forward_graph(get_num_states());
for (GroupAndTransitions gat : transition_system) {
const LabelGroup &label_group = gat.label_group;
const vector<Transition> &transitions = gat.transitions;
int cost = label_group.get_cost();
for (const Transition &transition : transitions) {
forward_graph[transition.src].push_back(
make_pair(transition.target, cost));
}
}
// TODO: Reuse the same queue for multiple computations to save speed?
// Also see compute_goal_distances_general_cost.
priority_queues::AdaptiveQueue<int> queue;
init_distances[transition_system.get_init_state()] = 0;
queue.push(0, transition_system.get_init_state());
dijkstra_search(forward_graph, queue, init_distances);
}
void Distances::compute_goal_distances_general_cost() {
vector<vector<pair<int, int>>> backward_graph(get_num_states());
for (GroupAndTransitions gat : transition_system) {
const LabelGroup &label_group = gat.label_group;
const vector<Transition> &transitions = gat.transitions;
int cost = label_group.get_cost();
for (const Transition &transition : transitions) {
backward_graph[transition.target].push_back(
make_pair(transition.src, cost));
}
}
// TODO: Reuse the same queue for multiple computations to save speed?
// Also see compute_init_distances_general_cost.
priority_queues::AdaptiveQueue<int> queue;
for (int state = 0; state < get_num_states(); ++state) {
if (transition_system.is_goal_state(state)) {
goal_distances[state] = 0;
queue.push(0, state);
}
}
dijkstra_search(backward_graph, queue, goal_distances);
}
void Distances::compute_distances(
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity) {
assert(compute_init_distances || compute_goal_distances);
/*
This method does the following:
- Computes the distances of abstract states from the abstract
initial state ("abstract g") and to the abstract goal states
("abstract h"), depending on the given flags.
*/
if (are_init_distances_computed()) {
/*
The only scenario where distance information is allowed to be
present when computing distances is when computing goal distances
for the final transition system in a setting where only init
distances have been computed during the merge-and-shrink computation.
*/
assert(!are_goal_distances_computed());
assert(goal_distances.empty());
assert(!compute_init_distances);
assert(compute_goal_distances);
} else {
/*
Otherwise, when computing distances, the previous (invalid)
distance information must have been cleared before.
*/
assert(!are_init_distances_computed() && !are_goal_distances_computed());
assert(init_distances.empty() && goal_distances.empty());
}
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << transition_system.tag();
}
int num_states = get_num_states();
if (num_states == 0) {
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << "empty transition system, no distances to compute" << endl;
}
init_distances_computed = true;
goal_distances_computed = true;
return;
}
if (compute_init_distances) {
init_distances.resize(num_states, INF);
}
if (compute_goal_distances) {
goal_distances.resize(num_states, INF);
}
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << "computing ";
if (compute_init_distances && compute_goal_distances) {
utils::g_log << "init and goal";
} else if (compute_init_distances) {
utils::g_log << "init";
} else if (compute_goal_distances) {
utils::g_log << "goal";
}
utils::g_log << " distances using ";
}
if (is_unit_cost()) {
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << "unit-cost";
}
if (compute_init_distances) {
compute_init_distances_unit_cost();
}
if (compute_goal_distances) {
compute_goal_distances_unit_cost();
}
} else {
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << "general-cost";
}
if (compute_init_distances) {
compute_init_distances_general_cost();
}
if (compute_goal_distances) {
compute_goal_distances_general_cost();
}
}
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << " algorithm" << endl;
}
if (compute_init_distances) {
init_distances_computed = true;
}
if (compute_goal_distances) {
goal_distances_computed = true;
}
}
void Distances::apply_abstraction(
const StateEquivalenceRelation &state_equivalence_relation,
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity) {
if (compute_init_distances) {
assert(are_init_distances_computed());
assert(state_equivalence_relation.size() < init_distances.size());
}
if (compute_goal_distances) {
assert(are_goal_distances_computed());
assert(state_equivalence_relation.size() < goal_distances.size());
}
int new_num_states = state_equivalence_relation.size();
vector<int> new_init_distances;
vector<int> new_goal_distances;
if (compute_init_distances) {
new_init_distances.resize(new_num_states, DISTANCE_UNKNOWN);
}
if (compute_goal_distances) {
new_goal_distances.resize(new_num_states, DISTANCE_UNKNOWN);
}
bool must_recompute = false;
for (int new_state = 0; new_state < new_num_states; ++new_state) {
const StateEquivalenceClass &state_equivalence_class =
state_equivalence_relation[new_state];
assert(!state_equivalence_class.empty());
StateEquivalenceClass::const_iterator pos = state_equivalence_class.begin();
int new_init_dist = -1;
int new_goal_dist = -1;
if (compute_init_distances) {
new_init_dist = init_distances[*pos];
}
if (compute_goal_distances) {
new_goal_dist = goal_distances[*pos];
}
++pos;
for (; pos != state_equivalence_class.end(); ++pos) {
if (compute_init_distances && init_distances[*pos] != new_init_dist) {
must_recompute = true;
break;
}
if (compute_goal_distances && goal_distances[*pos] != new_goal_dist) {
must_recompute = true;
break;
}
}
if (must_recompute)
break;
if (compute_init_distances) {
new_init_distances[new_state] = new_init_dist;
}
if (compute_goal_distances) {
new_goal_distances[new_state] = new_goal_dist;
}
}
if (must_recompute) {
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << transition_system.tag()
<< "simplification was not f-preserving!" << endl;
}
clear_distances();
compute_distances(
compute_init_distances, compute_goal_distances, verbosity);
} else {
init_distances = move(new_init_distances);
goal_distances = move(new_goal_distances);
}
}
void Distances::dump() const {
if (are_init_distances_computed()) {
utils::g_log << "Init distances: ";
for (size_t i = 0; i < init_distances.size(); ++i) {
utils::g_log << i << ": " << init_distances[i];
if (i != init_distances.size() - 1) {
utils::g_log << ", ";
}
}
utils::g_log << endl;
}
if (are_goal_distances_computed()) {
utils::g_log << "Goal distances: ";
for (size_t i = 0; i < goal_distances.size(); ++i) {
utils::g_log << i << ": " << goal_distances[i] << ", ";
if (i != goal_distances.size() - 1) {
utils::g_log << ", ";
}
}
utils::g_log << endl;
}
}
void Distances::statistics() const {
utils::g_log << transition_system.tag();
if (!are_goal_distances_computed()) {
utils::g_log << "goal distances not computed";
} else if (transition_system.is_solvable(*this)) {
utils::g_log << "init h=" << get_goal_distance(transition_system.get_init_state());
} else {
utils::g_log << "transition system is unsolvable";
}
utils::g_log << endl;
}
}
| 12,930 |
C++
| 33.299735 | 91 | 0.59652 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_total_order.cc
|
#include "merge_scoring_function_total_order.h"
#include "factored_transition_system.h"
#include "transition_system.h"
#include "../task_proxy.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
MergeScoringFunctionTotalOrder::MergeScoringFunctionTotalOrder(
const options::Options &options)
: atomic_ts_order(options.get<AtomicTSOrder>("atomic_ts_order")),
product_ts_order(options.get<ProductTSOrder>("product_ts_order")),
atomic_before_product(options.get<bool>("atomic_before_product")),
random_seed(options.get<int>("random_seed")),
rng(utils::parse_rng_from_options(options)) {
}
vector<double> MergeScoringFunctionTotalOrder::compute_scores(
const FactoredTransitionSystem &,
const vector<pair<int, int>> &merge_candidates) {
assert(initialized);
vector<double> scores;
scores.reserve(merge_candidates.size());
for (size_t candidate_index = 0; candidate_index < merge_candidates.size();
++candidate_index) {
pair<int, int> merge_candidate = merge_candidates[candidate_index];
int ts_index1 = merge_candidate.first;
int ts_index2 = merge_candidate.second;
for (size_t merge_candidate_order_index = 0;
merge_candidate_order_index < merge_candidate_order.size();
++merge_candidate_order_index) {
pair<int, int> other_candidate =
merge_candidate_order[merge_candidate_order_index];
if ((other_candidate.first == ts_index1 &&
other_candidate.second == ts_index2) ||
(other_candidate.second == ts_index1 &&
other_candidate.first == ts_index2)) {
// use the index in the merge candidate order as score
scores.push_back(merge_candidate_order_index);
break;
}
}
// We must have inserted a score for the current candidate.
assert(scores.size() == candidate_index + 1);
}
return scores;
}
void MergeScoringFunctionTotalOrder::initialize(const TaskProxy &task_proxy) {
initialized = true;
int num_variables = task_proxy.get_variables().size();
int max_transition_system_count = num_variables * 2 - 1;
vector<int> transition_system_order;
transition_system_order.reserve(max_transition_system_count);
// Compute the order in which atomic transition systems are considered
vector<int> atomic_tso;
atomic_tso.reserve(num_variables);
for (int i = 0; i < num_variables; ++i) {
atomic_tso.push_back(i);
}
if (atomic_ts_order == AtomicTSOrder::LEVEL) {
reverse(atomic_tso.begin(), atomic_tso.end());
} else if (atomic_ts_order == AtomicTSOrder::RANDOM) {
rng->shuffle(atomic_tso);
}
// Compute the order in which product transition systems are considered
vector<int> product_tso;
for (int i = num_variables; i < max_transition_system_count; ++i) {
product_tso.push_back(i);
}
if (product_ts_order == ProductTSOrder::NEW_TO_OLD) {
reverse(product_tso.begin(), product_tso.end());
} else if (product_ts_order == ProductTSOrder::RANDOM) {
rng->shuffle(product_tso);
}
// Put the orders in the correct order
if (atomic_before_product) {
transition_system_order.insert(transition_system_order.end(),
atomic_tso.begin(),
atomic_tso.end());
transition_system_order.insert(transition_system_order.end(),
product_tso.begin(),
product_tso.end());
} else {
transition_system_order.insert(transition_system_order.end(),
product_tso.begin(),
product_tso.end());
transition_system_order.insert(transition_system_order.end(),
atomic_tso.begin(),
atomic_tso.end());
}
merge_candidate_order.reserve(max_transition_system_count *
max_transition_system_count / 2);
for (size_t i = 0; i < transition_system_order.size(); ++i) {
for (size_t j = i + 1; j < transition_system_order.size(); ++j) {
merge_candidate_order.emplace_back(
transition_system_order[i], transition_system_order[j]);
}
}
}
string MergeScoringFunctionTotalOrder::name() const {
return "total order";
}
void MergeScoringFunctionTotalOrder::dump_function_specific_options() const {
utils::g_log << "Atomic transition system order: ";
switch (atomic_ts_order) {
case AtomicTSOrder::REVERSE_LEVEL:
utils::g_log << "reverse level";
break;
case AtomicTSOrder::LEVEL:
utils::g_log << "level";
break;
case AtomicTSOrder::RANDOM:
utils::g_log << "random";
break;
}
utils::g_log << endl;
utils::g_log << "Product transition system order: ";
switch (product_ts_order) {
case ProductTSOrder::OLD_TO_NEW:
utils::g_log << "old to new";
break;
case ProductTSOrder::NEW_TO_OLD:
utils::g_log << "new to old";
break;
case ProductTSOrder::RANDOM:
utils::g_log << "random";
break;
}
utils::g_log << endl;
utils::g_log << "Consider " << (atomic_before_product ?
"atomic before product" : "product before atomic")
<< " transition systems" << endl;
utils::g_log << "Random seed: " << random_seed << endl;
}
void MergeScoringFunctionTotalOrder::add_options_to_parser(
options::OptionParser &parser) {
vector<string> atomic_ts_order;
vector<string> atomic_ts_order_documentation;
atomic_ts_order.push_back("reverse_level");
atomic_ts_order_documentation.push_back(
"the variable order of Fast Downward");
atomic_ts_order.push_back("level");
atomic_ts_order_documentation.push_back("opposite of reverse_level");
atomic_ts_order.push_back("random");
atomic_ts_order_documentation.push_back("a randomized order");
parser.add_enum_option<AtomicTSOrder>(
"atomic_ts_order",
atomic_ts_order,
"The order in which atomic transition systems are considered when "
"considering pairs of potential merges.",
"reverse_level",
atomic_ts_order_documentation);
vector<string> product_ts_order;
vector<string> product_ts_order_documentation;
product_ts_order.push_back("old_to_new");
product_ts_order_documentation.push_back(
"consider composite transition systems from most recent to oldest, "
"that is in decreasing index order");
product_ts_order.push_back("new_to_old");
product_ts_order_documentation.push_back("opposite of old_to_new");
product_ts_order.push_back("random");
product_ts_order_documentation.push_back("a randomized order");
parser.add_enum_option<ProductTSOrder>(
"product_ts_order",
product_ts_order,
"The order in which product transition systems are considered when "
"considering pairs of potential merges.",
"new_to_old",
product_ts_order_documentation);
parser.add_option<bool>(
"atomic_before_product",
"Consider atomic transition systems before composite ones iff true.",
"false");
utils::add_rng_options(parser);
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Total order",
"This scoring function computes a total order on the merge candidates, "
"based on the specified options. The score for each merge candidate "
"correponds to its position in the order. This scoring function is "
"mainly intended as tie-breaking, and has been introduced in the "
"following paper:"
+ utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"An Analysis of Merge Strategies for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf",
"Proceedings of the 26th International Conference on Automated "
"Planning and Scheduling (ICAPS 2016)",
"294-298",
"AAAI Press",
"2016") +
"Furthermore, using the atomic_ts_order option, this scoring function, "
"if used alone in a score based filtering merge selector, can be used "
"to emulate the corresponding (precomputed) linear merge strategies "
"reverse level/level (independently of the other options).");
MergeScoringFunctionTotalOrder::add_options_to_parser(parser);
options::Options options = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeScoringFunctionTotalOrder>(options);
}
static options::Plugin<MergeScoringFunction> _plugin("total_order", _parse);
}
| 9,300 |
C++
| 38.578723 | 86 | 0.624516 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_sccs.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_SCCS_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_SCCS_H
#include "merge_strategy_factory.h"
namespace options {
class Options;
}
namespace merge_and_shrink {
class MergeTreeFactory;
class MergeSelector;
enum class OrderOfSCCs {
TOPOLOGICAL,
REVERSE_TOPOLOGICAL,
DECREASING,
INCREASING
};
class MergeStrategyFactorySCCs : public MergeStrategyFactory {
OrderOfSCCs order_of_sccs;
std::shared_ptr<MergeTreeFactory> merge_tree_factory;
std::shared_ptr<MergeSelector> merge_selector;
protected:
virtual std::string name() const override;
virtual void dump_strategy_specific_options() const override;
public:
MergeStrategyFactorySCCs(const options::Options &options);
virtual ~MergeStrategyFactorySCCs() override = default;
virtual std::unique_ptr<MergeStrategy> compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) override;
virtual bool requires_init_distances() const override;
virtual bool requires_goal_distances() const override;
};
}
#endif
| 1,111 |
C
| 26.799999 | 66 | 0.756976 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_tree_factory_linear.cc
|
#include "merge_tree_factory_linear.h"
#include "factored_transition_system.h"
#include "merge_tree.h"
#include "transition_system.h"
#include "../task_proxy.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/markup.h"
#include "../utils/rng_options.h"
#include "../utils/system.h"
#include <algorithm>
using namespace std;
namespace merge_and_shrink {
MergeTreeFactoryLinear::MergeTreeFactoryLinear(const options::Options &options)
: MergeTreeFactory(options),
variable_order_type(
options.get<variable_order_finder::VariableOrderType>("variable_order")) {
}
unique_ptr<MergeTree> MergeTreeFactoryLinear::compute_merge_tree(
const TaskProxy &task_proxy) {
variable_order_finder::VariableOrderFinder vof(task_proxy, variable_order_type);
MergeTreeNode *root = new MergeTreeNode(vof.next());
while (!vof.done()) {
MergeTreeNode *right_child = new MergeTreeNode(vof.next());
root = new MergeTreeNode(root, right_child);
}
return utils::make_unique_ptr<MergeTree>(
root, rng, update_option);
}
unique_ptr<MergeTree> MergeTreeFactoryLinear::compute_merge_tree(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts,
const vector<int> &indices_subset) {
/*
Compute a mapping from state variables to transition system indices
that contain those variables. Also set all indices not contained in
indices_subset to "used".
*/
int num_vars = task_proxy.get_variables().size();
int num_ts = fts.get_size();
vector<int> var_to_ts_index(num_vars, -1);
vector<bool> used_ts_indices(num_ts, true);
for (int ts_index : fts) {
bool use_ts_index =
find(indices_subset.begin(), indices_subset.end(),
ts_index) != indices_subset.end();
if (use_ts_index) {
used_ts_indices[ts_index] = false;
}
const vector<int> &vars =
fts.get_transition_system(ts_index).get_incorporated_variables();
for (int var : vars) {
var_to_ts_index[var] = ts_index;
}
}
/*
Compute the merge tree, using transition systems corresponding to
variables in order given by the variable order finder, implicitly
skipping all indices not in indices_subset, because these have been set
to "used" above.
*/
variable_order_finder::VariableOrderFinder vof(task_proxy, variable_order_type);
int next_var = vof.next();
int ts_index = var_to_ts_index[next_var];
assert(ts_index != -1);
// find the first valid ts index
while (used_ts_indices[ts_index]) {
assert(!vof.done());
next_var = vof.next();
ts_index = var_to_ts_index[next_var];
assert(ts_index != -1);
}
used_ts_indices[ts_index] = true;
MergeTreeNode *root = new MergeTreeNode(ts_index);
while (!vof.done()) {
next_var = vof.next();
ts_index = var_to_ts_index[next_var];
assert(ts_index != -1);
if (!used_ts_indices[ts_index]) {
used_ts_indices[ts_index] = true;
MergeTreeNode *right_child = new MergeTreeNode(ts_index);
root = new MergeTreeNode(root, right_child);
}
}
return utils::make_unique_ptr<MergeTree>(
root, rng, update_option);
}
string MergeTreeFactoryLinear::name() const {
return "linear";
}
void MergeTreeFactoryLinear::dump_tree_specific_options() const {
dump_variable_order_type(variable_order_type);
}
void MergeTreeFactoryLinear::add_options_to_parser(
options::OptionParser &parser) {
MergeTreeFactory::add_options_to_parser(parser);
vector<string> merge_strategies;
merge_strategies.push_back("CG_GOAL_LEVEL");
merge_strategies.push_back("CG_GOAL_RANDOM");
merge_strategies.push_back("GOAL_CG_LEVEL");
merge_strategies.push_back("RANDOM");
merge_strategies.push_back("LEVEL");
merge_strategies.push_back("REVERSE_LEVEL");
parser.add_enum_option<variable_order_finder::VariableOrderType>(
"variable_order", merge_strategies,
"the order in which atomic transition systems are merged",
"CG_GOAL_LEVEL");
}
static shared_ptr<MergeTreeFactory> _parse(options::OptionParser &parser) {
MergeTreeFactoryLinear::add_options_to_parser(parser);
parser.document_synopsis(
"Linear merge trees",
"These merge trees implement several linear merge orders, which "
"are described in the paper:" + utils::format_conference_reference(
{"Malte Helmert", "Patrik Haslum", "Joerg Hoffmann"},
"Flexible Abstraction Heuristics for Optimal Sequential Planning",
"https://ai.dmi.unibas.ch/papers/helmert-et-al-icaps2007.pdf",
"Proceedings of the Seventeenth International Conference on"
" Automated Planning and Scheduling (ICAPS 2007)",
"176-183",
"AAAI Press",
"2007"));
options::Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeTreeFactoryLinear>(opts);
}
static options::Plugin<MergeTreeFactory> _plugin("linear", _parse);
}
| 5,237 |
C++
| 34.154362 | 84 | 0.654 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy.cc
|
#include "merge_strategy.h"
using namespace std;
namespace merge_and_shrink {
MergeStrategy::MergeStrategy(
const FactoredTransitionSystem &fts)
: fts(fts) {
}
}
| 172 |
C++
| 14.727271 | 40 | 0.726744 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_algorithm.cc
|
#include "merge_and_shrink_algorithm.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "fts_factory.h"
#include "label_reduction.h"
#include "labels.h"
#include "merge_and_shrink_representation.h"
#include "merge_strategy.h"
#include "merge_strategy_factory.h"
#include "shrink_strategy.h"
#include "transition_system.h"
#include "types.h"
#include "utils.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../task_utils/task_properties.h"
#include "../utils/countdown_timer.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/math.h"
#include "../utils/system.h"
#include "../utils/timer.h"
#include <cassert>
#include <iostream>
#include <string>
#include <utility>
#include <vector>
using namespace std;
using options::Bounds;
using options::OptionParser;
using options::Options;
using utils::ExitCode;
namespace merge_and_shrink {
static void log_progress(const utils::Timer &timer, string msg) {
utils::g_log << "M&S algorithm timer: " << timer << " (" << msg << ")" << endl;
}
MergeAndShrinkAlgorithm::MergeAndShrinkAlgorithm(const Options &opts) :
merge_strategy_factory(opts.get<shared_ptr<MergeStrategyFactory>>("merge_strategy")),
shrink_strategy(opts.get<shared_ptr<ShrinkStrategy>>("shrink_strategy")),
label_reduction(opts.get<shared_ptr<LabelReduction>>("label_reduction", nullptr)),
max_states(opts.get<int>("max_states")),
max_states_before_merge(opts.get<int>("max_states_before_merge")),
shrink_threshold_before_merge(opts.get<int>("threshold_before_merge")),
prune_unreachable_states(opts.get<bool>("prune_unreachable_states")),
prune_irrelevant_states(opts.get<bool>("prune_irrelevant_states")),
verbosity(opts.get<utils::Verbosity>("verbosity")),
main_loop_max_time(opts.get<double>("main_loop_max_time")),
starting_peak_memory(0) {
assert(max_states_before_merge > 0);
assert(max_states >= max_states_before_merge);
assert(shrink_threshold_before_merge <= max_states_before_merge);
}
void MergeAndShrinkAlgorithm::report_peak_memory_delta(bool final) const {
if (final)
utils::g_log << "Final";
else
utils::g_log << "Current";
utils::g_log << " peak memory increase of merge-and-shrink algorithm: "
<< utils::get_peak_memory_in_kb() - starting_peak_memory << " KB"
<< endl;
}
void MergeAndShrinkAlgorithm::dump_options() const {
if (verbosity >= utils::Verbosity::NORMAL) {
if (merge_strategy_factory) { // deleted after merge strategy extraction
merge_strategy_factory->dump_options();
utils::g_log << endl;
}
utils::g_log << "Options related to size limits and shrinking: " << endl;
utils::g_log << "Transition system size limit: " << max_states << endl
<< "Transition system size limit right before merge: "
<< max_states_before_merge << endl;
utils::g_log << "Threshold to trigger shrinking right before merge: "
<< shrink_threshold_before_merge << endl;
utils::g_log << endl;
utils::g_log << "Pruning unreachable states: "
<< (prune_unreachable_states ? "yes" : "no") << endl;
utils::g_log << "Pruning irrelevant states: "
<< (prune_irrelevant_states ? "yes" : "no") << endl;
utils::g_log << endl;
if (label_reduction) {
label_reduction->dump_options();
} else {
utils::g_log << "Label reduction disabled" << endl;
}
utils::g_log << endl;
utils::g_log << "Main loop max time in seconds: " << main_loop_max_time << endl;
utils::g_log << endl;
}
}
void MergeAndShrinkAlgorithm::warn_on_unusual_options() const {
string dashes(79, '=');
if (!label_reduction) {
utils::g_log << dashes << endl
<< "WARNING! You did not enable label reduction.\nThis may "
"drastically reduce the performance of merge-and-shrink!"
<< endl << dashes << endl;
} else if (label_reduction->reduce_before_merging() && label_reduction->reduce_before_shrinking()) {
utils::g_log << dashes << endl
<< "WARNING! You set label reduction to be applied twice in each merge-and-shrink\n"
"iteration, both before shrinking and merging. This double computation effort\n"
"does not pay off for most configurations!"
<< endl << dashes << endl;
} else {
if (label_reduction->reduce_before_shrinking() &&
(shrink_strategy->get_name() == "f-preserving"
|| shrink_strategy->get_name() == "random")) {
utils::g_log << dashes << endl
<< "WARNING! Bucket-based shrink strategies such as f-preserving random perform\n"
"best if used with label reduction before merging, not before shrinking!"
<< endl << dashes << endl;
}
if (label_reduction->reduce_before_merging() &&
shrink_strategy->get_name() == "bisimulation") {
utils::g_log << dashes << endl
<< "WARNING! Shrinking based on bisimulation performs best if used with label\n"
"reduction before shrinking, not before merging!"
<< endl << dashes << endl;
}
}
if (!prune_unreachable_states || !prune_irrelevant_states) {
utils::g_log << dashes << endl
<< "WARNING! Pruning is (partially) turned off!\nThis may "
"drastically reduce the performance of merge-and-shrink!"
<< endl << dashes << endl;
}
}
bool MergeAndShrinkAlgorithm::ran_out_of_time(
const utils::CountdownTimer &timer) const {
if (timer.is_expired()) {
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Ran out of time, stopping computation." << endl;
utils::g_log << endl;
}
return true;
}
return false;
}
void MergeAndShrinkAlgorithm::main_loop(
FactoredTransitionSystem &fts,
const TaskProxy &task_proxy) {
utils::CountdownTimer timer(main_loop_max_time);
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Starting main loop ";
if (main_loop_max_time == numeric_limits<double>::infinity()) {
utils::g_log << "without a time limit." << endl;
} else {
utils::g_log << "with a time limit of "
<< main_loop_max_time << "s." << endl;
}
}
int maximum_intermediate_size = 0;
for (int i = 0; i < fts.get_size(); ++i) {
int size = fts.get_transition_system(i).get_size();
if (size > maximum_intermediate_size) {
maximum_intermediate_size = size;
}
}
if (label_reduction) {
label_reduction->initialize(task_proxy);
}
unique_ptr<MergeStrategy> merge_strategy =
merge_strategy_factory->compute_merge_strategy(task_proxy, fts);
merge_strategy_factory = nullptr;
auto log_main_loop_progress = [&timer](const string &msg) {
utils::g_log << "M&S algorithm main loop timer: "
<< timer.get_elapsed_time()
<< " (" << msg << ")" << endl;
};
int iteration_counter = 0;
while (fts.get_num_active_entries() > 1) {
// Choose next transition systems to merge
pair<int, int> merge_indices = merge_strategy->get_next();
if (ran_out_of_time(timer)) {
break;
}
int merge_index1 = merge_indices.first;
int merge_index2 = merge_indices.second;
assert(merge_index1 != merge_index2);
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Next pair of indices: ("
<< merge_index1 << ", " << merge_index2 << ")" << endl;
if (verbosity >= utils::Verbosity::VERBOSE) {
fts.statistics(merge_index1);
fts.statistics(merge_index2);
}
log_main_loop_progress("after computation of next merge");
}
// Label reduction (before shrinking)
if (label_reduction && label_reduction->reduce_before_shrinking()) {
bool reduced = label_reduction->reduce(merge_indices, fts, verbosity);
if (verbosity >= utils::Verbosity::NORMAL && reduced) {
log_main_loop_progress("after label reduction");
}
}
if (ran_out_of_time(timer)) {
break;
}
// Shrinking
bool shrunk = shrink_before_merge_step(
fts,
merge_index1,
merge_index2,
max_states,
max_states_before_merge,
shrink_threshold_before_merge,
*shrink_strategy,
verbosity);
if (verbosity >= utils::Verbosity::NORMAL && shrunk) {
log_main_loop_progress("after shrinking");
}
if (ran_out_of_time(timer)) {
break;
}
// Label reduction (before merging)
if (label_reduction && label_reduction->reduce_before_merging()) {
bool reduced = label_reduction->reduce(merge_indices, fts, verbosity);
if (verbosity >= utils::Verbosity::NORMAL && reduced) {
log_main_loop_progress("after label reduction");
}
}
if (ran_out_of_time(timer)) {
break;
}
// Merging
int merged_index = fts.merge(merge_index1, merge_index2, verbosity);
int abs_size = fts.get_transition_system(merged_index).get_size();
if (abs_size > maximum_intermediate_size) {
maximum_intermediate_size = abs_size;
}
if (verbosity >= utils::Verbosity::NORMAL) {
if (verbosity >= utils::Verbosity::VERBOSE) {
fts.statistics(merged_index);
}
log_main_loop_progress("after merging");
}
if (ran_out_of_time(timer)) {
break;
}
// Pruning
if (prune_unreachable_states || prune_irrelevant_states) {
bool pruned = prune_step(
fts,
merged_index,
prune_unreachable_states,
prune_irrelevant_states,
verbosity);
if (verbosity >= utils::Verbosity::NORMAL && pruned) {
if (verbosity >= utils::Verbosity::VERBOSE) {
fts.statistics(merged_index);
}
log_main_loop_progress("after pruning");
}
}
/*
NOTE: both the shrink strategy classes and the construction
of the composite transition system require the input
transition systems to be non-empty, i.e. the initial state
not to be pruned/not to be evaluated as infinity.
*/
if (!fts.is_factor_solvable(merged_index)) {
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Abstract problem is unsolvable, stopping "
"computation. " << endl << endl;
}
break;
}
if (ran_out_of_time(timer)) {
break;
}
// End-of-iteration output.
if (verbosity >= utils::Verbosity::VERBOSE) {
report_peak_memory_delta();
}
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << endl;
}
++iteration_counter;
}
utils::g_log << "End of merge-and-shrink algorithm, statistics:" << endl;
utils::g_log << "Main loop runtime: " << timer.get_elapsed_time() << endl;
utils::g_log << "Maximum intermediate abstraction size: "
<< maximum_intermediate_size << endl;
shrink_strategy = nullptr;
label_reduction = nullptr;
}
FactoredTransitionSystem MergeAndShrinkAlgorithm::build_factored_transition_system(
const TaskProxy &task_proxy) {
if (starting_peak_memory) {
cerr << "Calling build_factored_transition_system twice is not "
<< "supported!" << endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
starting_peak_memory = utils::get_peak_memory_in_kb();
utils::Timer timer;
utils::g_log << "Running merge-and-shrink algorithm..." << endl;
task_properties::verify_no_axioms(task_proxy);
dump_options();
warn_on_unusual_options();
utils::g_log << endl;
const bool compute_init_distances =
shrink_strategy->requires_init_distances() ||
merge_strategy_factory->requires_init_distances() ||
prune_unreachable_states;
const bool compute_goal_distances =
shrink_strategy->requires_goal_distances() ||
merge_strategy_factory->requires_goal_distances() ||
prune_irrelevant_states;
FactoredTransitionSystem fts =
create_factored_transition_system(
task_proxy,
compute_init_distances,
compute_goal_distances,
verbosity);
if (verbosity >= utils::Verbosity::NORMAL) {
log_progress(timer, "after computation of atomic factors");
}
/*
Prune all atomic factors according to the chosen options. Stop early if
one factor is unsolvable.
TODO: think about if we can prune already while creating the atomic FTS.
*/
bool pruned = false;
bool unsolvable = false;
for (int index = 0; index < fts.get_size(); ++index) {
assert(fts.is_active(index));
if (prune_unreachable_states || prune_irrelevant_states) {
bool pruned_factor = prune_step(
fts,
index,
prune_unreachable_states,
prune_irrelevant_states,
verbosity);
pruned = pruned || pruned_factor;
}
if (!fts.is_factor_solvable(index)) {
utils::g_log << "Atomic FTS is unsolvable, stopping computation." << endl;
unsolvable = true;
break;
}
}
if (verbosity >= utils::Verbosity::NORMAL) {
if (pruned) {
log_progress(timer, "after pruning atomic factors");
}
utils::g_log << endl;
}
if (!unsolvable && main_loop_max_time > 0) {
main_loop(fts, task_proxy);
}
const bool final = true;
report_peak_memory_delta(final);
utils::g_log << "Merge-and-shrink algorithm runtime: " << timer << endl;
utils::g_log << endl;
return fts;
}
void add_merge_and_shrink_algorithm_options_to_parser(OptionParser &parser) {
// Merge strategy option.
parser.add_option<shared_ptr<MergeStrategyFactory>>(
"merge_strategy",
"See detailed documentation for merge strategies. "
"We currently recommend SCC-DFP, which can be achieved using "
"{{{merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector="
"score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order"
"]))}}}");
// Shrink strategy option.
parser.add_option<shared_ptr<ShrinkStrategy>>(
"shrink_strategy",
"See detailed documentation for shrink strategies. "
"We currently recommend non-greedy shrink_bisimulation, which can be "
"achieved using {{{shrink_strategy=shrink_bisimulation(greedy=false)}}}");
// Label reduction option.
parser.add_option<shared_ptr<LabelReduction>>(
"label_reduction",
"See detailed documentation for labels. There is currently only "
"one 'option' to use label_reduction, which is {{{label_reduction=exact}}} "
"Also note the interaction with shrink strategies.",
OptionParser::NONE);
// Pruning options.
parser.add_option<bool>(
"prune_unreachable_states",
"If true, prune abstract states unreachable from the initial state.",
"true");
parser.add_option<bool>(
"prune_irrelevant_states",
"If true, prune abstract states from which no goal state can be "
"reached.",
"true");
add_transition_system_size_limit_options_to_parser(parser);
/*
silent: no output during construction, only starting and final statistics
normal: basic output during construction, starting and final statistics
verbose: full output during construction, starting and final statistics
debug: full output with additional debug output
*/
utils::add_verbosity_option_to_parser(parser);
parser.add_option<double>(
"main_loop_max_time",
"A limit in seconds on the runtime of the main loop of the algorithm. "
"If the limit is exceeded, the algorithm terminates, potentially "
"returning a factored transition system with several factors. Also "
"note that the time limit is only checked between transformations "
"of the main loop, but not during, so it can be exceeded if a "
"transformation is runtime-intense.",
"infinity",
Bounds("0.0", "infinity"));
}
void add_transition_system_size_limit_options_to_parser(OptionParser &parser) {
parser.add_option<int>(
"max_states",
"maximum transition system size allowed at any time point.",
"-1",
Bounds("-1", "infinity"));
parser.add_option<int>(
"max_states_before_merge",
"maximum transition system size allowed for two transition systems "
"before being merged to form the synchronized product.",
"-1",
Bounds("-1", "infinity"));
parser.add_option<int>(
"threshold_before_merge",
"If a transition system, before being merged, surpasses this soft "
"transition system size limit, the shrink strategy is called to "
"possibly shrink the transition system.",
"-1",
Bounds("-1", "infinity"));
}
void handle_shrink_limit_options_defaults(Options &opts) {
int max_states = opts.get<int>("max_states");
int max_states_before_merge = opts.get<int>("max_states_before_merge");
int threshold = opts.get<int>("threshold_before_merge");
// If none of the two state limits has been set: set default limit.
if (max_states == -1 && max_states_before_merge == -1) {
max_states = 50000;
}
// If exactly one of the max_states options has been set, set the other
// so that it imposes no further limits.
if (max_states_before_merge == -1) {
max_states_before_merge = max_states;
} else if (max_states == -1) {
int n = max_states_before_merge;
if (utils::is_product_within_limit(n, n, INF)) {
max_states = n * n;
} else {
max_states = INF;
}
}
if (max_states_before_merge > max_states) {
utils::g_log << "warning: max_states_before_merge exceeds max_states, "
<< "correcting." << endl;
max_states_before_merge = max_states;
}
if (max_states < 1) {
cerr << "error: transition system size must be at least 1" << endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
if (max_states_before_merge < 1) {
cerr << "error: transition system size before merge must be at least 1"
<< endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
if (threshold == -1) {
threshold = max_states;
}
if (threshold < 1) {
cerr << "error: threshold must be at least 1" << endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
if (threshold > max_states) {
utils::g_log << "warning: threshold exceeds max_states, correcting" << endl;
threshold = max_states;
}
opts.set<int>("max_states", max_states);
opts.set<int>("max_states_before_merge", max_states_before_merge);
opts.set<int>("threshold_before_merge", threshold);
}
}
| 20,008 |
C++
| 36.470037 | 107 | 0.588365 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_bisimulation.cc
|
#include "shrink_bisimulation.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "label_equivalence_relation.h"
#include "transition_system.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/system.h"
#include <algorithm>
#include <cassert>
#include <limits>
#include <iostream>
#include <memory>
#include <unordered_map>
using namespace std;
namespace merge_and_shrink {
/* A successor signature characterizes the behaviour of an abstract
state in so far as bisimulation cares about it. States with
identical successor signature are not distinguished by
bisimulation.
Each entry in the vector is a pair of (label group ID, equivalence class of
successor). The bisimulation algorithm requires that the vector is
sorted and uniquified. */
using SuccessorSignature = vector<pair<int, int>>;
/*
As we use SENTINEL numeric_limits<int>::max() as a sentinel signature and
irrelevant states have a distance of INF = numeric_limits<int>::max(), we
use INF - 1 as the distance value for all irrelevant states. This guarantees
that also irrelevant states are always ordered before the sentinel.
*/
const int SENTINEL = numeric_limits<int>::max();
const int IRRELEVANT = SENTINEL - 1;
/*
The following class encodes all we need to know about a state for
bisimulation: its h value, which equivalence class ("group") it currently
belongs to, its successor signature (see above), and what the original
state is.
*/
struct Signature {
int h_and_goal; // -1 for goal states; h value for non-goal states
int group;
SuccessorSignature succ_signature;
int state;
Signature(int h, bool is_goal, int group_,
const SuccessorSignature &succ_signature_,
int state_)
: group(group_), succ_signature(succ_signature_), state(state_) {
if (is_goal) {
assert(h == 0);
h_and_goal = -1;
} else {
h_and_goal = h;
}
}
bool operator<(const Signature &other) const {
if (h_and_goal != other.h_and_goal)
return h_and_goal < other.h_and_goal;
if (group != other.group)
return group < other.group;
if (succ_signature != other.succ_signature)
return succ_signature < other.succ_signature;
return state < other.state;
}
void dump() const {
utils::g_log << "Signature(h_and_goal = " << h_and_goal
<< ", group = " << group
<< ", state = " << state
<< ", succ_sig = [";
for (size_t i = 0; i < succ_signature.size(); ++i) {
if (i)
utils::g_log << ", ";
utils::g_log << "(" << succ_signature[i].first
<< "," << succ_signature[i].second
<< ")";
}
utils::g_log << "])" << endl;
}
};
ShrinkBisimulation::ShrinkBisimulation(const Options &opts)
: greedy(opts.get<bool>("greedy")),
at_limit(opts.get<AtLimit>("at_limit")) {
}
int ShrinkBisimulation::initialize_groups(
const TransitionSystem &ts,
const Distances &distances,
vector<int> &state_to_group) const {
/* Group 0 holds all goal states.
Each other group holds all states with one particular h value.
Note that some goal state *must* exist because irrelevant and
unreachable states are pruned before we shrink and we never
perform the shrinking if that pruning shows that the problem is
unsolvable.
*/
typedef unordered_map<int, int> GroupMap;
GroupMap h_to_group;
int num_groups = 1; // Group 0 is for goal states.
for (int state = 0; state < ts.get_size(); ++state) {
int h = distances.get_goal_distance(state);
if (h == INF) {
h = IRRELEVANT;
}
if (ts.is_goal_state(state)) {
assert(h == 0);
state_to_group[state] = 0;
} else {
pair<GroupMap::iterator, bool> result = h_to_group.insert(
make_pair(h, num_groups));
state_to_group[state] = result.first->second;
if (result.second) {
// We inserted a new element => a new group was started.
++num_groups;
}
}
}
return num_groups;
}
void ShrinkBisimulation::compute_signatures(
const TransitionSystem &ts,
const Distances &distances,
vector<Signature> &signatures,
const vector<int> &state_to_group) const {
assert(signatures.empty());
// Step 1: Compute bare state signatures (without transition information).
signatures.push_back(Signature(-2, false, -1, SuccessorSignature(), -1));
for (int state = 0; state < ts.get_size(); ++state) {
int h = distances.get_goal_distance(state);
if (h == INF) {
h = IRRELEVANT;
}
Signature signature(h, ts.is_goal_state(state),
state_to_group[state], SuccessorSignature(),
state);
signatures.push_back(signature);
}
signatures.push_back(Signature(SENTINEL, false, -1, SuccessorSignature(), -1));
// Step 2: Add transition information.
int label_group_counter = 0;
/*
Note that the final result of the bisimulation may depend on the
order in which transitions are considered below.
If label groups were sorted (every group by increasing label numbers,
groups by smallest label number), then the following configuration
gives a different result on parcprinter-08-strips:p06.pddl:
astar(merge_and_shrink(
merge_strategy=merge_stateless(merge_selector=
score_based_filtering(scoring_functions=[goal_relevance,dfp,
total_order])),
shrink_strategy=shrink_bisimulation(greedy=false),
label_reduction=exact(before_shrinking=true,before_merging=false),
max_states=50000,threshold_before_merge=1))
The same behavioral difference can be obtained even without modifying
the merge-and-shrink code, using the two revisions c66ee00a250a and
d2e317621f2c. Running the above config, adapted to the old syntax,
yields the same difference:
astar(merge_and_shrink(merge_strategy=merge_dfp,
shrink_strategy=shrink_bisimulation(greedy=false,max_states=50000,
threshold=1),
label_reduction=exact(before_shrinking=true,before_merging=false)))
*/
for (GroupAndTransitions gat : ts) {
const LabelGroup &label_group = gat.label_group;
const vector<Transition> &transitions = gat.transitions;
for (const Transition &transition : transitions) {
assert(signatures[transition.src + 1].state == transition.src);
bool skip_transition = false;
if (greedy) {
int src_h = distances.get_goal_distance(transition.src);
int target_h = distances.get_goal_distance(transition.target);
if (src_h == INF || target_h == INF) {
// We skip transitions connected to an irrelevant state.
skip_transition = true;
} else {
int cost = label_group.get_cost();
assert(target_h + cost >= src_h);
skip_transition = (target_h + cost != src_h);
}
}
if (!skip_transition) {
int target_group = state_to_group[transition.target];
assert(target_group != -1 && target_group != SENTINEL);
signatures[transition.src + 1].succ_signature.push_back(
make_pair(label_group_counter, target_group));
}
}
++label_group_counter;
}
/* Step 3: Canonicalize the representation. The resulting
signatures must satisfy the following properties:
1. Signature::operator< defines a total order with the correct
sentinels at the start and end. The signatures vector is
sorted according to that order.
2. Goal states come before non-goal states, and low-h states come
before high-h states.
3. States that currently fall into the same group form contiguous
subsequences.
4. Two signatures compare equal according to Signature::operator<
iff we don't want to distinguish their states in the current
bisimulation round.
*/
for (size_t i = 0; i < signatures.size(); ++i) {
SuccessorSignature &succ_sig = signatures[i].succ_signature;
::sort(succ_sig.begin(), succ_sig.end());
succ_sig.erase(::unique(succ_sig.begin(), succ_sig.end()),
succ_sig.end());
}
::sort(signatures.begin(), signatures.end());
}
StateEquivalenceRelation ShrinkBisimulation::compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const {
assert(distances.are_goal_distances_computed());
int num_states = ts.get_size();
vector<int> state_to_group(num_states);
vector<Signature> signatures;
signatures.reserve(num_states + 2);
int num_groups = initialize_groups(ts, distances, state_to_group);
// utils::g_log << "number of initial groups: " << num_groups << endl;
// TODO: We currently violate this; see issue250
// assert(num_groups <= target_size);
bool stable = false;
bool stop_requested = false;
while (!stable && !stop_requested && num_groups < target_size) {
stable = true;
signatures.clear();
compute_signatures(ts, distances, signatures, state_to_group);
// Verify size of signatures and presence of sentinels.
assert(static_cast<int>(signatures.size()) == num_states + 2);
assert(signatures[0].h_and_goal == -2);
assert(signatures[num_states + 1].h_and_goal == SENTINEL);
int sig_start = 1; // Skip over initial sentinel.
while (true) {
int h_and_goal = signatures[sig_start].h_and_goal;
if (h_and_goal == SENTINEL) {
// We have hit the end sentinel.
assert(sig_start + 1 == static_cast<int>(signatures.size()));
break;
}
// Compute the number of groups needed after splitting.
int num_old_groups = 0;
int num_new_groups = 0;
int sig_end;
for (sig_end = sig_start; true; ++sig_end) {
if (signatures[sig_end].h_and_goal != h_and_goal) {
break;
}
const Signature &prev_sig = signatures[sig_end - 1];
const Signature &curr_sig = signatures[sig_end];
if (sig_end == sig_start) {
assert(prev_sig.group != curr_sig.group);
}
if (prev_sig.group != curr_sig.group) {
++num_old_groups;
++num_new_groups;
} else if (prev_sig.succ_signature != curr_sig.succ_signature) {
++num_new_groups;
}
}
assert(sig_end > sig_start);
if (at_limit == AtLimit::RETURN &&
num_groups - num_old_groups + num_new_groups > target_size) {
/* Can't split the group (or the set of groups for
this h value) -- would exceed bound on abstract
state number.
*/
stop_requested = true;
break;
} else if (num_new_groups != num_old_groups) {
// Split into new groups.
stable = false;
int new_group_no = -1;
for (int i = sig_start; i < sig_end; ++i) {
const Signature &prev_sig = signatures[i - 1];
const Signature &curr_sig = signatures[i];
if (prev_sig.group != curr_sig.group) {
// Start first group of a block; keep old group no.
new_group_no = curr_sig.group;
} else if (prev_sig.succ_signature
!= curr_sig.succ_signature) {
new_group_no = num_groups++;
assert(num_groups <= target_size);
}
assert(new_group_no != -1);
state_to_group[curr_sig.state] = new_group_no;
if (num_groups == target_size)
break;
}
if (num_groups == target_size)
break;
}
sig_start = sig_end;
}
}
/* Reduce memory pressure before generating the equivalence
relation since this is one of the code parts relevant to peak
memory. */
utils::release_vector_memory(signatures);
// Generate final result.
StateEquivalenceRelation equivalence_relation;
equivalence_relation.resize(num_groups);
for (int state = 0; state < num_states; ++state) {
int group = state_to_group[state];
if (group != -1) {
assert(group >= 0 && group < num_groups);
equivalence_relation[group].push_front(state);
}
}
return equivalence_relation;
}
string ShrinkBisimulation::name() const {
return "bisimulation";
}
void ShrinkBisimulation::dump_strategy_specific_options() const {
utils::g_log << "Bisimulation type: " << (greedy ? "greedy" : "exact") << endl;
utils::g_log << "At limit: ";
if (at_limit == AtLimit::RETURN) {
utils::g_log << "return";
} else if (at_limit == AtLimit::USE_UP) {
utils::g_log << "use up limit";
} else {
ABORT("Unknown setting for at_limit.");
}
utils::g_log << endl;
}
static shared_ptr<ShrinkStrategy>_parse(OptionParser &parser) {
parser.document_synopsis(
"Bismulation based shrink strategy",
"This shrink strategy implements the algorithm described in"
" the paper:" + utils::format_conference_reference(
{"Raz Nissim", "Joerg Hoffmann", "Malte Helmert"},
"Computing Perfect Heuristics in Polynomial Time: On Bisimulation"
" and Merge-and-Shrink Abstractions in Optimal Planning.",
"https://ai.dmi.unibas.ch/papers/nissim-et-al-ijcai2011.pdf",
"Proceedings of the Twenty-Second International Joint Conference"
" on Artificial Intelligence (IJCAI 2011)",
"1983-1990",
"AAAI Press",
"2011"));
parser.document_note(
"shrink_bisimulation(greedy=true)",
"Combine this with the merge-and-shrink options max_states=infinity "
"and threshold_before_merge=1 and with the linear merge strategy "
"reverse_level to obtain the variant 'greedy bisimulation without size "
"limit', called M&S-gop in the IJCAI 2011 paper. "
"When we last ran experiments on interaction of shrink strategies "
"with label reduction, this strategy performed best when used with "
"label reduction before shrinking (and no label reduction before "
"merging).");
parser.document_note(
"shrink_bisimulation(greedy=false)",
"Combine this with the merge-and-shrink option max_states=N (where N "
"is a numerical parameter for which sensible values include 1000, "
"10000, 50000, 100000 and 200000) and with the linear merge strategy "
"reverse_level to obtain the variant 'exact bisimulation with a size "
"limit', called DFP-bop in the IJCAI 2011 paper. "
"When we last ran experiments on interaction of shrink strategies "
"with label reduction, this strategy performed best when used with "
"label reduction before shrinking (and no label reduction before "
"merging).");
parser.add_option<bool>("greedy", "use greedy bisimulation", "false");
vector<string> at_limit;
at_limit.push_back("RETURN");
at_limit.push_back("USE_UP");
parser.add_enum_option<AtLimit>(
"at_limit", at_limit,
"what to do when the size limit is hit", "RETURN");
Options opts = parser.parse();
if (parser.help_mode())
return nullptr;
if (parser.dry_run())
return nullptr;
else
return make_shared<ShrinkBisimulation>(opts);
}
static Plugin<ShrinkStrategy> _plugin("shrink_bisimulation", _parse);
}
| 16,724 |
C++
| 37.360092 | 83 | 0.585326 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_bisimulation.h
|
#ifndef MERGE_AND_SHRINK_SHRINK_BISIMULATION_H
#define MERGE_AND_SHRINK_SHRINK_BISIMULATION_H
#include "shrink_strategy.h"
namespace options {
class Options;
}
namespace merge_and_shrink {
struct Signature;
enum class AtLimit {
RETURN,
USE_UP
};
class ShrinkBisimulation : public ShrinkStrategy {
const bool greedy;
const AtLimit at_limit;
void compute_abstraction(
const TransitionSystem &ts,
const Distances &distances,
int target_size,
StateEquivalenceRelation &equivalence_relation) const;
int initialize_groups(
const TransitionSystem &ts,
const Distances &distances,
std::vector<int> &state_to_group) const;
void compute_signatures(
const TransitionSystem &ts,
const Distances &distances,
std::vector<Signature> &signatures,
const std::vector<int> &state_to_group) const;
protected:
virtual void dump_strategy_specific_options() const override;
virtual std::string name() const override;
public:
explicit ShrinkBisimulation(const options::Options &opts);
virtual ~ShrinkBisimulation() override = default;
virtual StateEquivalenceRelation compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return true;
}
};
}
#endif
| 1,521 |
C
| 24.366666 | 66 | 0.693623 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/labels.cc
|
#include "labels.h"
#include "types.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
Labels::Labels(vector<unique_ptr<Label>> &&labels)
: labels(move(labels)),
max_size(0) {
if (!this->labels.empty()) {
max_size = this->labels.size() * 2 - 1;
}
}
void Labels::reduce_labels(const vector<int> &old_label_nos) {
/*
Even though we currently only support exact label reductions where
reduced labels are of equal cost, to support non-exact label reductions,
we compute the cost of the new label as the minimum cost of all old
labels reduced to it to satisfy admissibility.
*/
int new_label_cost = INF;
for (size_t i = 0; i < old_label_nos.size(); ++i) {
int old_label_no = old_label_nos[i];
int cost = get_label_cost(old_label_no);
if (cost < new_label_cost) {
new_label_cost = cost;
}
labels[old_label_no] = nullptr;
}
labels.push_back(utils::make_unique_ptr<Label>(new_label_cost));
}
bool Labels::is_current_label(int label_no) const {
assert(utils::in_bounds(label_no, labels));
return labels[label_no] != nullptr;
}
int Labels::get_label_cost(int label_no) const {
assert(labels[label_no]);
return labels[label_no]->get_cost();
}
void Labels::dump_labels() const {
utils::g_log << "active labels:" << endl;
for (size_t label_no = 0; label_no < labels.size(); ++label_no) {
if (labels[label_no]) {
utils::g_log << "label " << label_no
<< ", cost " << labels[label_no]->get_cost()
<< endl;
}
}
}
}
| 1,773 |
C++
| 27.15873 | 78 | 0.593909 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_precomputed.cc
|
#include "merge_strategy_precomputed.h"
#include "factored_transition_system.h"
#include "merge_tree.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
MergeStrategyPrecomputed::MergeStrategyPrecomputed(
const FactoredTransitionSystem &fts, unique_ptr<MergeTree> merge_tree)
: MergeStrategy(fts), merge_tree(move(merge_tree)) {
}
pair<int, int> MergeStrategyPrecomputed::get_next() {
assert(!merge_tree->done());
int next_merge_index = fts.get_size();
pair<int, int> next_merge = merge_tree->get_next_merge(next_merge_index);
assert(fts.is_active(next_merge.first));
assert(fts.is_active(next_merge.second));
return next_merge;
}
}
| 690 |
C++
| 26.639999 | 77 | 0.723188 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_sccs.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_SCCS_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_SCCS_H
#include "merge_strategy.h"
#include <memory>
#include <vector>
class TaskProxy;
namespace merge_and_shrink {
class MergeSelector;
class MergeTreeFactory;
class MergeTree;
class MergeStrategySCCs : public MergeStrategy {
const TaskProxy &task_proxy;
std::shared_ptr<MergeTreeFactory> merge_tree_factory;
std::shared_ptr<MergeSelector> merge_selector;
std::vector<std::vector<int>> non_singleton_cg_sccs;
std::vector<int> indices_of_merged_sccs;
// Active "merge strategies" while merging a set of indices
std::unique_ptr<MergeTree> current_merge_tree;
std::vector<int> current_ts_indices;
public:
MergeStrategySCCs(
const FactoredTransitionSystem &fts,
const TaskProxy &task_proxy,
const std::shared_ptr<MergeTreeFactory> &merge_tree_factory,
const std::shared_ptr<MergeSelector> &merge_selector,
std::vector<std::vector<int>> non_singleton_cg_sccs,
std::vector<int> indices_of_merged_sccs);
virtual ~MergeStrategySCCs() override;
virtual std::pair<int, int> get_next() override;
};
}
#endif
| 1,184 |
C
| 29.384615 | 68 | 0.719595 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_bucket_based.h
|
#ifndef MERGE_AND_SHRINK_SHRINK_BUCKET_BASED_H
#define MERGE_AND_SHRINK_SHRINK_BUCKET_BASED_H
#include "shrink_strategy.h"
#include <memory>
#include <vector>
namespace options {
class OptionParser;
class Options;
}
namespace utils {
class RandomNumberGenerator;
}
namespace merge_and_shrink {
/* A base class for bucket-based shrink strategies.
A bucket-based strategy partitions the states into an ordered
vector of buckets, from low to high priority, and then abstracts
them to a given target size according to the following rules:
Repeat until we respect the target size:
If any bucket still contains two states:
Combine two random states from the non-singleton bucket
with the lowest priority.
Otherwise:
Combine the two lowest-priority buckets.
For the (usual) case where the target size is larger than the
number of buckets, this works out in such a way that the
high-priority buckets are not abstracted at all, the low-priority
buckets are abstracted by combining all states in each bucket, and
(up to) one bucket "in the middle" is partially abstracted.
*/
class ShrinkBucketBased : public ShrinkStrategy {
protected:
using Bucket = std::vector<int>;
std::shared_ptr<utils::RandomNumberGenerator> rng;
private:
StateEquivalenceRelation compute_abstraction(
const std::vector<Bucket> &buckets,
int target_size) const;
protected:
virtual std::vector<Bucket> partition_into_buckets(
const TransitionSystem &ts,
const Distances &Distances) const = 0;
public:
explicit ShrinkBucketBased(const options::Options &opts);
virtual ~ShrinkBucketBased() override = default;
virtual StateEquivalenceRelation compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const override;
static void add_options_to_parser(options::OptionParser &parser);
};
}
#endif
| 1,974 |
C
| 29.859375 | 69 | 0.72999 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_heuristic.h
|
#ifndef MERGE_AND_SHRINK_MERGE_AND_SHRINK_HEURISTIC_H
#define MERGE_AND_SHRINK_MERGE_AND_SHRINK_HEURISTIC_H
#include "../heuristic.h"
#include <memory>
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
class MergeAndShrinkRepresentation;
class MergeAndShrinkHeuristic : public Heuristic {
const utils::Verbosity verbosity;
// The final merge-and-shrink representations, storing goal distances.
std::vector<std::unique_ptr<MergeAndShrinkRepresentation>> mas_representations;
void extract_factor(FactoredTransitionSystem &fts, int index);
bool extract_unsolvable_factor(FactoredTransitionSystem &fts);
void extract_nontrivial_factors(FactoredTransitionSystem &fts);
void extract_factors(FactoredTransitionSystem &fts);
protected:
virtual int compute_heuristic(const State &ancestor_state) override;
public:
explicit MergeAndShrinkHeuristic(const options::Options &opts);
};
}
#endif
| 978 |
C
| 27.794117 | 83 | 0.784254 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.cc
|
#include "merge_scoring_function_miasm_utils.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "shrink_strategy.h"
#include "transition_system.h"
#include "utils.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include <algorithm>
using namespace std;
namespace merge_and_shrink {
/*
Compute a state equivalence relation for the given transition system with
the given shrink strategy, respecting the given size limit new_size. If the
result of applying it actually reduced the size of the transition system,
copy the transition system, apply the state equivalence relation to it and
return the result. Return nullptr otherwise.
*/
unique_ptr<TransitionSystem> copy_and_shrink_ts(
const TransitionSystem &ts,
const Distances &distances,
const ShrinkStrategy &shrink_strategy,
int new_size,
utils::Verbosity verbosity) {
/*
TODO: think about factoring out common logic of this function and the
function shrink_factor in utils.cc
*/
StateEquivalenceRelation equivalence_relation =
shrink_strategy.compute_equivalence_relation(ts, distances, new_size);
// TODO: We currently violate this; see issue250
//assert(equivalence_relation.size() <= target_size);
int new_num_states = equivalence_relation.size();
if (new_num_states < ts.get_size()) {
/*
If we actually shrink the transition system, we first need to copy it,
then shrink it and return it.
*/
vector<int> abstraction_mapping = compute_abstraction_mapping(
ts.get_size(), equivalence_relation);
unique_ptr<TransitionSystem> ts_copy =
utils::make_unique_ptr<TransitionSystem>(ts);
ts_copy->apply_abstraction(
equivalence_relation, abstraction_mapping, verbosity);
return ts_copy;
} else {
return nullptr;
}
}
unique_ptr<TransitionSystem> shrink_before_merge_externally(
const FactoredTransitionSystem &fts,
int index1,
int index2,
const ShrinkStrategy &shrink_strategy,
int max_states,
int max_states_before_merge,
int shrink_threshold_before_merge) {
const TransitionSystem &original_ts1 = fts.get_transition_system(index1);
const TransitionSystem &original_ts2 = fts.get_transition_system(index2);
/*
Determine size limits and if shrinking is necessary or possible as done
in the merge-and-shrink loop.
*/
pair<int, int> new_sizes = compute_shrink_sizes(
original_ts1.get_size(),
original_ts2.get_size(),
max_states_before_merge,
max_states);
bool must_shrink_ts1 = original_ts1.get_size() > min(new_sizes.first, shrink_threshold_before_merge);
bool must_shrink_ts2 = original_ts2.get_size() > min(new_sizes.second, shrink_threshold_before_merge);
/*
If we need to shrink, copy_and_shrink_ts will take care of computing
a copy, shrinking it, and returning it. (In cases where shrinking is
only triggered due to the threshold being passed but no perfect
shrinking is possible, the method returns a null pointer.)
*/
utils::Verbosity verbosity = utils::Verbosity::SILENT;
unique_ptr<TransitionSystem> ts1 = nullptr;
if (must_shrink_ts1) {
ts1 = copy_and_shrink_ts(
original_ts1,
fts.get_distances(index1),
shrink_strategy,
new_sizes.first,
verbosity);
}
unique_ptr<TransitionSystem> ts2 = nullptr;
if (must_shrink_ts2) {
ts2 = copy_and_shrink_ts(
original_ts2,
fts.get_distances(index2),
shrink_strategy,
new_sizes.second,
verbosity);
}
/*
Return the product, using either the original transition systems or
the copied and shrunk ones.
*/
return TransitionSystem::merge(
fts.get_labels(),
(ts1 ? *ts1 : original_ts1),
(ts2 ? *ts2 : original_ts2),
verbosity);
}
}
| 4,014 |
C++
| 33.316239 | 106 | 0.664175 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_selector.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SELECTOR_H
#define MERGE_AND_SHRINK_MERGE_SELECTOR_H
#include <string>
#include <vector>
class TaskProxy;
namespace merge_and_shrink {
class FactoredTransitionSystem;
class MergeSelector {
protected:
virtual std::string name() const = 0;
virtual void dump_specific_options() const {}
std::vector<std::pair<int, int>> compute_merge_candidates(
const FactoredTransitionSystem &fts,
const std::vector<int> &indices_subset) const;
public:
MergeSelector() = default;
virtual ~MergeSelector() = default;
virtual std::pair<int, int> select_merge(
const FactoredTransitionSystem &fts,
const std::vector<int> &indices_subset = std::vector<int>()) const = 0;
virtual void initialize(const TaskProxy &task_proxy) = 0;
void dump_options() const;
virtual bool requires_init_distances() const = 0;
virtual bool requires_goal_distances() const = 0;
};
}
#endif
| 953 |
C
| 28.812499 | 79 | 0.701994 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.