repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DAAISy | DAAISy-main/dependencies/FD/experiments/issue547/issue547-v2-opt.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from downward.reports.scatter import ScatterPlotReport
import common_setup
from relativescatter import RelativeScatterPlotReport
SEARCH_REVS = ["issue547-base", "issue547-v2"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
'astar_ipdb': [
'--search',
'astar(ipdb())'],
}
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_search_parser("custom-parser.py")
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"]
exp.add_comparison_table_step(attributes=attributes)
for conf in CONFIGS:
for attr in ("memory", "search_time"):
exp.add_report(
RelativeScatterPlotReport(
attributes=[attr],
get_category=lambda run1, run2: run1.get("domain"),
filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf]
),
outfile='issue547_base_v2_%s_%s.png' % (conf, attr)
)
exp()
| 1,104 | 25.309524 | 111 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue547/custom-parser.py | #! /usr/bin/env python
from lab.parser import Parser
class CustomParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern(
"successor_generator_time",
"Building successor generator...done! \[t=(.+)s\]",
required=False,
type=float)
if __name__ == "__main__":
parser = CustomParser()
print "Running custom parser"
parser.parse()
| 430 | 20.55 | 63 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue547/issue547.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs, suites
from downward.reports.scatter import ScatterPlotReport
# Cactus plots are experimental in lab, and require some changes to
# classes in lab, so we cannot add them es external files here.
try:
from downward.reports.cactus import CactusPlotReport
has_cactus_plot = True
except:
has_cactus_plot = False
from lab.experiment import Step
from lab.fetcher import Fetcher
import common_setup
from relativescatter import RelativeScatterPlotReport
SEARCH_REVS = ["issue547-base", "issue547-v1"]
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
'astar_blind': [
'--search',
'astar(blind())'],
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_lmcut': [
'--search',
'astar(lmcut())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
}
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_search_parser("custom-parser.py")
exp.add_step(Step('refetch', Fetcher(), exp.path, parsers=['custom-parser.py']))
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"]
exp.add_comparison_table_step(attributes=attributes)
for conf in CONFIGS:
for attr in ("memory", "search_time"):
exp.add_report(
RelativeScatterPlotReport(
attributes=[attr],
get_category=lambda run1, run2: run1.get("domain"),
filter_config=["issue547-base-%s" % conf, "issue547-v1-%s" % conf]
),
outfile='issue547_base_v1_%s_%s.png' % (conf, attr)
)
if has_cactus_plot:
exp.add_report(CactusPlotReport(attributes=['successor_generator_time'],
filter_config_nick="astar_blind",
ylabel='successor_generator_time',
get_category=lambda run: run['config_nick'],
category_styles={'astar_blind': {'linestyle': '-', 'c':'red'}}
))
exp()
| 2,079 | 28.714286 | 111 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue667/suites.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import textwrap
HELP = "Convert suite name to list of domains or tasks."
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(set(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat()))
def suite_unsolvable():
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl() + suite_ipc14_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt() + suite_ipc14_opt_strips())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl() + suite_ipc14_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat() + suite_ipc14_sat_strips())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_ipc14() + suite_alternative_formulations())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("suite", help="suite name")
return parser.parse_args()
def main():
prefix = "suite_"
suite_names = [
name[len(prefix):] for name in sorted(globals().keys())
if name.startswith(prefix)]
parser = argparse.ArgumentParser(description=HELP)
parser.add_argument("suite", choices=suite_names, help="suite name")
parser.add_argument(
"--width", default=72, type=int,
help="output line width (default: %(default)s). Use 1 for single "
"column.")
args = parser.parse_args()
suite_func = globals()[prefix + args.suite]
print(textwrap.fill(
str(suite_func()),
width=args.width,
break_long_words=False,
break_on_hyphens=False))
if __name__ == "__main__":
main()
| 8,551 | 23.364672 | 77 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue667/ms-parser.py | #! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int)
parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float)
parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int)
parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float)
def check_ms_constructed(content, props):
ms_construction_time = props.get('ms_construction_time')
abstraction_constructed = False
if ms_construction_time is not None:
abstraction_constructed = True
props['ms_abstraction_constructed'] = abstraction_constructed
parser.add_function(check_ms_constructed)
def check_planner_exit_reason(content, props):
ms_abstraction_constructed = props.get('ms_abstraction_constructed')
error = props.get('error')
if error != 'none' and error != 'timeout' and error != 'out-of-memory':
print 'error: %s' % error
return
# Check whether merge-and-shrink computation or search ran out of
# time or memory.
ms_out_of_time = False
ms_out_of_memory = False
search_out_of_time = False
search_out_of_memory = False
if ms_abstraction_constructed == False:
if error == 'timeout':
ms_out_of_time = True
elif error == 'out-of-memory':
ms_out_of_memory = True
elif ms_abstraction_constructed == True:
if error == 'timeout':
search_out_of_time = True
elif error == 'out-of-memory':
search_out_of_memory = True
props['ms_out_of_time'] = ms_out_of_time
props['ms_out_of_memory'] = ms_out_of_memory
props['search_out_of_time'] = search_out_of_time
props['search_out_of_memory'] = search_out_of_memory
parser.add_function(check_planner_exit_reason)
def check_perfect_heuristic(content, props):
plan_length = props.get('plan_length')
expansions = props.get('expansions')
if plan_length != None:
perfect_heuristic = False
if plan_length + 1 == expansions:
perfect_heuristic = True
props['perfect_heuristic'] = perfect_heuristic
parser.add_function(check_perfect_heuristic)
def check_proved_unsolvability(content, props):
proved_unsolvability = False
if props['coverage'] == 0:
for line in content.splitlines():
if line == 'Completely explored state space -- no solution!':
proved_unsolvability = True
break
props['proved_unsolvability'] = proved_unsolvability
parser.add_function(check_proved_unsolvability)
parser.parse()
| 2,784 | 37.150685 | 135 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue667/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, benchmarks_dir, suite, revisions=[], configs={},
grid_priority=None, path=None, test_suite=None,
email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(benchmarks_dir, suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, name='', **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
*name* is a custom name for the report.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
if name == '':
name = get_experiment_name()
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
name + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_custom_comparison_table_step(self, name, **kwargs):
"""Add a step that compares the configurations given in
*compared_configs*.
*compared_configs* must be specified. See CompareConfigsReport class.
*name* is a custom name for the report.
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = CompareConfigsReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
name + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-custom-comparison-report',
subprocess.call,
['publish', outfile]))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 13,582 | 34.098191 | 83 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue667/relativescatter.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 84 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue667/v1-v2.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not availabe, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('sccs-top-dfp-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('sccs-top-dfp-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_absolute_report_step(name='issue667-v1-abp',filter_config=[
'%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v1',
])
exp.add_absolute_report_step(name='issue667-v1-pba',filter_config=[
'%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v1',
])
exp.add_absolute_report_step(name='issue667-v2-abp',filter_config=[
'%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v2',
])
exp.add_absolute_report_step(name='issue667-v2-pba',filter_config=[
'%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v2',
])
exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-abp',compared_configs=[
('%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v2'),
])
exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-pba',compared_configs=[
('%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v2'),
])
exp.add_absolute_report_step(name='issue667-v1-paper',filter_config=[
'%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1',
'%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v1',
])
exp.add_absolute_report_step(name='issue667-v2-paper',filter_config=[
'%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2',
'%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v2',
])
exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-paper',compared_configs=[
('%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2'),
('%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v1', '%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v2'),
])
#if matplotlib:
#for attribute in ["memory", "total_time"]:
#for config in configs:
#exp.add_report(
#RelativeScatterPlotReport(
#attributes=[attribute],
#filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
#get_category=lambda run1, run2: run1.get("domain"),
#),
#outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
#)
exp()
main(revisions=['issue667-v1', 'issue667-v2'])
| 18,318 | 93.917098 | 481 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue884/parser.py | #! /usr/bin/env python
import logging
import re
from lab.parser import Parser
class CommonParser(Parser):
def add_difference(self, diff, val1, val2):
def diff_func(content, props):
if props.get(val1) is None or props.get(val2) is None:
diff_val = None
else:
diff_val = props.get(val1) - props.get(val2)
props[diff] = diff_val
self.add_function(diff_func)
def _get_flags(self, flags_string):
flags = 0
for char in flags_string:
flags |= getattr(re, char)
return flags
def add_repeated_pattern(
self, name, regex, file="run.log", required=False, type=int,
flags=""):
def find_all_occurences(content, props):
matches = re.findall(regex, content, flags=self._get_flags(flags))
if required and not matches:
logging.error("Pattern {} not found in file {}".format(regex, file))
props[name] = [type(m) for m in matches]
self.add_function(find_all_occurences, file=file)
def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""):
Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags)
def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""):
def search_from_bottom(content, props):
reversed_content = "\n".join(reversed(content.splitlines()))
match = re.search(regex, reversed_content, flags=self._get_flags(flags))
if required and not match:
logging.error("Pattern {} not found in file {}".format(regex, file))
if match:
props[name] = type(match.group(1))
self.add_function(search_from_bottom, file=file)
def no_search(content, props):
if "search_start_time" not in props:
error = props.get("error")
if error is not None and error != "incomplete-search-found-no-plan":
props["error"] = "no-search-due-to-" + error
REFINEMENT_TIMES = [
("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"),
("time_for_finding_flaws", r"Time for finding flaws: (.+)s"),
("time_for_splitting_states", r"Time for splitting states: (.+)s"),
]
REFINEMENT_VALUES = [
("loops", r"Looping transitions: (\d+)\n"),
("transitions", r"Non-looping transitions: (\d+)\n"),
]
def compute_totals(content, props):
for attribute, pattern in REFINEMENT_TIMES + REFINEMENT_VALUES:
props["total_" + attribute] = sum(props[attribute])
def add_time_analysis(content, props):
init_time = props.get("init_time")
if not init_time:
return
parts = []
parts.append("{init_time:.2f}:".format(**props))
for attribute, pattern in REFINEMENT_TIMES:
time = props["total_" + attribute]
relative_time = time / init_time
print time, type(time)
parts.append("{:.2f} ({:.2f})".format(time, relative_time))
props["time_analysis"] = " ".join(parts)
def main():
parser = CommonParser()
parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float)
parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int)
parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float)
parser.add_pattern("cartesian_states", r"Cartesian states: (\d+)\n", type=int)
parser.add_pattern("loops", r"Looping transitions: (\d+)\n", type=int)
parser.add_pattern("state_changing_transitions", r"Non-looping transitions: (\d+)\n", type=int)
for attribute, pattern in REFINEMENT_TIMES:
parser.add_repeated_pattern(attribute, pattern, type=float, required=False)
for attribute, pattern in REFINEMENT_VALUES:
parser.add_repeated_pattern(attribute, pattern, type=int, required=False)
parser.add_function(no_search)
parser.add_function(compute_totals)
parser.add_function(add_time_analysis)
parser.parse()
if __name__ == "__main__":
main()
| 4,165 | 35.54386 | 109 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue884/v4.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue884-v2", "issue884-v2-heap-queue", "issue884-v4"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick + "-" + max_transitions_nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for max_transitions_nick, max_transitions in [("2M", 2000000)]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
#"depot:p02.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [
"search_start_memory", "init_time", "time_analysis", "total_loops",
"total_transitions"] +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
for revisions in itertools.combinations(REVISIONS, 2):
for attribute in ["total_time_for_finding_traces"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *revisions))
exp.run_steps()
| 2,672 | 31.597561 | 134 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue884/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,786 | 36.435443 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue884/v3.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue884-base", "issue884-v1", "issue884-v2", "issue884-v3"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick + "-" + max_transitions_nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for max_transitions_nick, max_transitions in [("2M", 2000000)]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]),
("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
#"depot:p02.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [
"search_start_memory", "init_time", "time_analysis", "total_loops",
"total_transitions"] +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "total_time_for_finding_traces"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,762 | 32.695122 | 134 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue884/relativescatter.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if not val1 or not val2:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,867 | 35.490566 | 78 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue468/issue468.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_lmcount_lm_merged_rhw_hm': [
'--search',
'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue468-base", "issue468-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 462 | 19.130435 | 85 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue468/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (node.endswith("cluster") or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
# TODO: Once we have reference results, we should add "quality".
# TODO: Add something about errors/exit codes.
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" % (rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for attribute in valid_attributes:
name = "-".join([rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,734 | 35.594828 | 79 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue747/v1.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue747-base", "issue747-v1"]
CONFIGS = [
IssueConfig('lazy-greedy-blind', ['--search', 'lazy_greedy([blind()])']),
IssueConfig('lama-first', [], driver_options=["--alias", "lama-first"]),
IssueConfig('lwastar-ff', ["--heuristic", "h=ff()", "--search", "lazy_wastar([h],preferred=[h],w=5)"])
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attribute in ["total_time"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)
)
exp.run_steps()
| 1,587 | 32.083333 | 106 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue747/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'pipesworld-notankage',
'pipesworld-tankage', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage',
'tetris-opt14-strips', 'tidybot-opt11-strips',
'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips',
'transport-opt11-strips', 'transport-opt14-strips',
'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips',
'woodworking-opt08-strips', 'woodworking-opt11-strips',
'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'airport', 'assembly', 'barman-sat11-strips',
'barman-sat14-strips', 'blocks', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot',
'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell',
'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips',
'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic',
'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime',
'mystery', 'nomystery-sat11-strips', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,171 | 35.715026 | 79 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue747/relativescatter.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/v1-lama-first-ignore-pref.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue846-v1"]
BUILDS = ["release32"]
CONFIG_NICKS = [
("lama-first-pref-{pref}".format(**locals()), [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm],"
"cost_type=one,reopen_closed=false)"])
for pref in ["none", "simple"]
] + [
("lama-first-pref-{pref}-ignore".format(**locals()), [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", "lazy_greedy([hff,hlm],preferred=[hff],"
"cost_type=one,reopen_closed=false)"])
for pref in ["simple"]
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 2,448 | 30.805195 | 140 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/v2-lama-first.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue846-base", "issue846-v2"]
BUILDS = ["release32"]
CONFIG_NICKS = [
("lama-first-pref-{pref}".format(**locals()), [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), pref={pref})".format(**locals()),
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm],"
"cost_type=one,reopen_closed=false)"])
for pref in ["true", "false"]
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_comparison_table_step()
exp.run_steps()
| 1,943 | 28.014925 | 125 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/v1-bjolp.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue846-v1"]
BUILDS = ["release32"]
CONFIG_NICKS = [
("bjolp-pref-{pref}".format(**locals()), [
"--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, preferred_operators={pref})".format(**locals()),
"--search",
"astar(lmc,lazy_evaluator=lmc)"])
for pref in ["none", "simple"]
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 1,834 | 25.985294 | 119 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/v1-lama-first-no-ff.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue846-v1"]
BUILDS = ["release32"]
CONFIG_NICKS = [
("{index}-lama-first-no-ff-pref-{pref}".format(**locals()), [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()),
"--search", "lazy_greedy([hlm],preferred=[hlm],"
"cost_type=one,reopen_closed=false)"])
for index, pref in enumerate(["none", "simple", "all"])
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 1,969 | 27.970588 | 140 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/v2-bjolp.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue846-base", "issue846-v2"]
BUILDS = ["release32"]
CONFIG_NICKS = [
("bjolp-pref-{pref}".format(**locals()), [
"--evaluator",
"lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, pref={pref})".format(**locals()),
"--search",
"astar(lmc,lazy_evaluator=lmc)"])
for pref in ["false"]
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,802 | 25.910448 | 104 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,786 | 36.435443 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/v1-lama-first.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue846-v1"]
BUILDS = ["release32"]
CONFIG_NICKS = [
("lama-first-pref-{pref}".format(**locals()), [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm],"
"cost_type=one,reopen_closed=false)"])
for pref in ["none", "simple", "all"]
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(filter_algorithm=["issue846-v1-lama-first-pref-none", "issue846-v1-lama-first-pref-simple", "issue846-v1-lama-first-pref-all"])
#exp.add_comparison_table_step()
exp.run_steps()
| 2,132 | 29.913043 | 156 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/v1-lama.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue846-v1"]
BUILDS = ["release32"]
CONFIG_NICKS = [
("lama-no-syn-pref-{pref}".format(**locals()), [
"--if-unit-cost",
"--evaluator",
"hlm=lmcount(lm_rhw(reasonable_orders=true), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff=ff()",
"--search", """iterated([
lazy_greedy([hff,hlm],preferred=[hff,hlm]),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2),
lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--if-non-unit-cost",
"--evaluator",
"hlm1=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff1=ff(transform=adapt_costs(one))",
"--evaluator",
"hlm2=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(plusone), preferred_operators={pref})".format(**locals()),
"--evaluator", "hff2=ff(transform=adapt_costs(plusone))",
"--search", """iterated([
lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],
cost_type=one,reopen_closed=false),
lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],
reopen_closed=false),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2),
lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)
],repeat_last=true,continue_on_fail=true)""",
"--always"])
for pref in ["none", "simple", "all"]
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(filter_algorithm=["issue846-v1-lama-no-syn-pref-none", "issue846-v1-lama-no-syn-pref-simple", "issue846-v1-lama-no-syn-pref-all"])
#exp.add_comparison_table_step()
exp.run_steps()
| 3,606 | 37.784946 | 159 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue846/relativescatter.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue899/v1-opt.py | #! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue899-base", "issue899-v1"]
CONFIGS = [
IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["search_time", "total_time"])
exp.run_steps()
| 1,444 | 26.788462 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue899/v1-sat.py | #! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue899-base", "issue899-v1"]
CONFIGS = [
IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]),
IssueConfig("lm_hm", [
"--landmarks", "lm=lm_hm(2)",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy([hlm])"]),
IssueConfig("lm_exhaust", [
"--landmarks", "lm=lm_exhaust()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy([hlm])"]),
IssueConfig("lm_rhw", [
"--landmarks", "lm=lm_rhw()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy([hlm])"]),
IssueConfig("lm_zg", [
"--landmarks", "lm=lm_zg()",
"--heuristic", "hlm=lmcount(lm)",
"--search", "lazy_greedy([hlm])"]),
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,972 | 28.447761 | 76 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue899/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print("Make scatter plot for", name)
algo1 = get_algo_nick(rev1, config_nick)
algo2 = get_algo_nick(rev2, config_nick)
report = report_class(
filter_algorithm=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"])
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,744 | 36.423858 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue899/relativescatter.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue743/v2.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue743-v2"]
CONFIGS = [
IssueConfig(
'ipdb-goal-vars-{goal_vars}'.format(**locals()),
['--search', 'astar(ipdb(max_time=900, use_co_effect_goal_variables={goal_vars}))'.format(**locals())])
for goal_vars in [False, True]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 1,135 | 27.4 | 111 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue743/v1.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue743-v1"]
CONFIGS = [
IssueConfig(
'ipdb-goal-vars-{goal_vars}'.format(**locals()),
['--search', 'astar(ipdb(max_time=900, consider_co_effect_vars={goal_vars}))'.format(**locals())])
for goal_vars in [False, True]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 1,130 | 27.275 | 106 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue743/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'pipesworld-notankage',
'pipesworld-tankage', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage',
'tetris-opt14-strips', 'tidybot-opt11-strips',
'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips',
'transport-opt11-strips', 'transport-opt14-strips',
'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips',
'woodworking-opt08-strips', 'woodworking-opt11-strips',
'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'airport', 'assembly', 'barman-sat11-strips',
'barman-sat14-strips', 'blocks', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot',
'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell',
'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips',
'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic',
'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime',
'mystery', 'nomystery-sat11-strips', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_running_on_cluster_login_node():
return platform.node() == "login20.cluster.bc2.ch"
def can_publish():
return is_running_on_cluster_login_node() or not is_running_on_cluster()
def publish(report_file):
if can_publish():
subprocess.call(["publish", report_file])
else:
print "publishing reports is not supported on this node"
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, name="make-absolute-report", outfile=outfile)
self.add_step("publish-absolute-report", publish, outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def get_revision_pairs_and_files():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
yield (rev1, rev2, outfile)
def make_comparison_tables():
for rev1, rev2, outfile in get_revision_pairs_and_files():
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for _, _, outfile in get_revision_pairs_and_files():
publish(outfile)
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step("publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,462 | 35.24812 | 79 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue743/v3.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue743-v2", "issue743-v3"]
CONFIGS = [
IssueConfig('ipdb-900s', ['--search', 'astar(ipdb(max_time=900))'])
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 1,000 | 26.054054 | 71 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue743/relativescatter.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue743/v2-vs-base.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = []
CONFIGS = []
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_algorithm(
"base-ipdb-no-goal-vars", common_setup.get_repo_base(), "issue743-base",
['--search', 'astar(ipdb(max_time=900))'])
exp.add_algorithm(
"v2-ipdb-no-goal-vars", common_setup.get_repo_base(), "issue743-v2",
['--search', 'astar(ipdb(max_time=900, use_co_effect_goal_variables=false))'])
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
exp.run_steps()
| 1,218 | 28.731707 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue511/sat-v1.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue511-base", "issue511-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"eager_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"],
"lazy_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 650 | 18.147059 | 45 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue511/opt-v1.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue511-base", "issue511-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"astar_blind": ["--search", "astar(blind())"],
"astar_hmax": ["--search", "astar(hmax())"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 492 | 17.961538 | 50 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue511/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-%s-compare.html" %
(self.name, rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config_nick in self._config_nicks:
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
for attribute in valid_attributes:
make_scatter_plot(config_nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,755 | 35.135977 | 79 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/v2-sat.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue700-v2-base", "issue700-v2"]
CONFIGS = [
IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]),
IssueConfig(
"lama-first",
[],
driver_options=["--alias", "lama-first"]),
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attr in ["total_time", "search_time", "memory"]:
for rev1, rev2 in [("v2-base", "v2")]:
for config_nick in ["ehc_ff", "lama-first"]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue700-%s-%s" % (rev1, config_nick),
"issue700-%s-%s" % (rev2, config_nick)],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2))
exp.run_steps()
| 1,675 | 29.472727 | 90 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/v2-opt.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue700-v2-base", "issue700-v2"]
CONFIGS = [
IssueConfig("h2", ["--search", "astar(hm(2))"]),
IssueConfig("ipdb", ["--search", "astar(ipdb())"]),
IssueConfig("blind", ["--search", "astar(blind())"]),
IssueConfig("lmcut", ["--search", "astar(lmcut())"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attr in ["total_time", "search_time", "memory"]:
for rev1, rev2 in [("v2-base", "v2")]:
for config_nick in ["h2", "ipdb", "blind", "lmcut"]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue700-%s-%s" % (rev1, config_nick),
"issue700-%s-%s" % (rev2, config_nick)],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2))
exp.run_steps()
| 1,710 | 31.283019 | 81 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/v1-opt.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue700-base", "issue700-v1"]
CONFIGS = [
IssueConfig("blind", ["--search", "astar(blind())"]),
IssueConfig("lmcut", ["--search", "astar(lmcut())"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attr in ["total_time", "search_time", "memory"]:
for rev1, rev2 in [("base", "v1")]:
for config_nick in ["blind", "lmcut"]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue700-%s-%s" % (rev1, config_nick),
"issue700-%s-%s" % (rev2, config_nick)],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2))
exp.run_steps()
| 1,581 | 30.019608 | 81 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/v1-sat.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue700-base", "issue700-v1"]
CONFIGS = [
IssueConfig(
"lama-first",
[],
driver_options=["--alias", "lama-first"]),
IssueConfig(
"lama",
[],
driver_options=["--alias", "seq-sat-lama-2011"]),
IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]),
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step(filter_algorithm=["lama-first"])
exp.add_comparison_table_step()
for attr in ["total_time", "search_time", "memory"]:
for rev1, rev2 in [("base", "v1")]:
for config_nick in ["lama-first", "ehc_ff"]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue700-%s-%s" % (rev1, config_nick),
"issue700-%s-%s" % (rev2, config_nick)],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2))
exp.run_steps()
| 1,803 | 29.576271 | 90 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'pipesworld-notankage',
'pipesworld-tankage', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage',
'tetris-opt14-strips', 'tidybot-opt11-strips',
'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips',
'transport-opt11-strips', 'transport-opt14-strips',
'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips',
'woodworking-opt08-strips', 'woodworking-opt11-strips',
'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'airport', 'assembly', 'barman-sat11-strips',
'barman-sat14-strips', 'blocks', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot',
'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell',
'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips',
'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic',
'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime',
'mystery', 'nomystery-sat11-strips', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,171 | 35.715026 | 79 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/v1-opt2.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue700-base", "issue700-v1"]
CONFIGS = [
IssueConfig("h2", ["--search", "astar(hm(2))"]),
IssueConfig("ipdb", ["--search", "astar(ipdb())"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attr in ["total_time", "search_time", "memory"]:
for rev1, rev2 in [("base", "v1")]:
for config_nick in ["h2", "ipdb"]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue700-%s-%s" % (rev1, config_nick),
"issue700-%s-%s" % (rev2, config_nick)],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2))
exp.run_steps()
| 1,570 | 29.803922 | 81 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/v1-sat2.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue700-base", "issue700-v1"]
CONFIGS = [
IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]),
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attr in ["total_time", "search_time", "memory"]:
for rev1, rev2 in [("base", "v1")]:
for config_nick in ["ehc_ff"]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue700-%s-%s" % (rev1, config_nick),
"issue700-%s-%s" % (rev2, config_nick)],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2))
exp.run_steps()
| 1,553 | 29.470588 | 90 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/relativescatter.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue700/v3-opt.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue700-v2", "issue700-v3"]
CONFIGS = [
IssueConfig("h2", ["--search", "astar(hm(2))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attr in ["total_time", "search_time", "memory"]:
for rev1, rev2 in [("v2", "v3")]:
for config_nick in ["h2"]:
exp.add_report(RelativeScatterPlotReport(
attributes=[attr],
filter_algorithm=["issue700-%s-%s" % (rev1, config_nick),
"issue700-%s-%s" % (rev2, config_nick)],
get_category=lambda r1, r2: r1["domain"],
),
outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2))
exp.run_steps()
| 1,502 | 29.06 | 81 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v1-lama-sat.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v1"]
SUITE = suites.suite_satisficing()
CONFIGS = [
IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]),
IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 552 | 19.481481 | 90 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v3-lama-sat.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v3"]
SUITE = suites.suite_satisficing()
CONFIGS = [
IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]),
IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 552 | 19.481481 | 90 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v3-lama-opt2.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v3"]
SUITE = suites.suite_optimal_strips()
CONFIGS = [
IssueConfig("lm_zg", [
"--landmarks",
"lm=lm_zg()",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_exhaust", [
"--landmarks",
"lm=lm_exhaust()",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_hm", [
"--landmarks",
"lm=lm_hm(2)",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_hm_max", [
"--landmarks",
"lm=lm_hm(2)",
"--heuristic",
"h1=lmcount(lm,admissible=true)",
"--heuristic",
"h2=lmcount(lm,admissible=false)",
"--search",
"astar(max([h1,h2]))"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 1,157 | 20.054545 | 53 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v2-lama-sat.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v2"]
SUITE = suites.suite_satisficing()
CONFIGS = [
IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]),
IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 552 | 19.481481 | 90 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v4-lama-opt.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v4"]
SUITE = suites.suite_optimal_strips()
CONFIGS = [
IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
IssueConfig("lm_zg", [
"--landmarks",
"lm=lm_zg()",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_exhaust", [
"--landmarks",
"lm=lm_exhaust()",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_hm", [
"--landmarks",
"lm=lm_hm(2)",
"--heuristic",
"hlm=lmcount(lm)",
"--search",
"astar(hlm)"]),
IssueConfig("lm_hm_max", [
"--landmarks",
"lm=lm_hm(2)",
"--heuristic",
"h1=lmcount(lm,admissible=true)",
"--heuristic",
"h2=lmcount(lm,admissible=false)",
"--search",
"astar(max([h1,h2]))"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 1,240 | 21.160714 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v3-lama-opt.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v3"]
SUITE = suites.suite_optimal_strips()
CONFIGS = [
IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 470 | 17.115385 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Wrapper for FastDownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions, configs, suite, grid_priority=None,
path=None, test_suite=None, email=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(os.path.join(repo, "benchmarks"), suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append((
"{rev1}-{config_nick}".format(**locals()),
"{rev2}-{config_nick}".format(**locals()),
"Diff ({config_nick})".format(**locals())))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"{name}-{rev1}-{rev2}-compare.html".format(
name=self.name, **locals()))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 11,842 | 33.628655 | 79 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v4-lama-sat.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v4"]
SUITE = suites.suite_satisficing()
CONFIGS = [
IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]),
IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 552 | 19.481481 | 90 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v1-lama-opt.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v1"]
SUITE = suites.suite_optimal_strips()
CONFIGS = [
IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 470 | 17.115385 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue592/v2-lama-opt.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup import IssueConfig, IssueExperiment
REVS = ["issue592-base", "issue592-v2"]
SUITE = suites.suite_optimal_strips()
CONFIGS = [
IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
]
exp = IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
email="[email protected]"
)
exp.add_comparison_table_step()
exp()
| 470 | 17.115385 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue420/issue420-v1.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward.suites import suite_optimal_with_ipc11
import common_setup
REVS = ["issue420-base", "issue420-v1"]
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"lmcut": ["--search", "astar(lmcut())"],
}
TEST_RUN = False
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = suite_optimal_with_ipc11()
PRIORITY = 0 # number means maia experiment
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_comparison_table_step(
attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES
)
exp()
| 718 | 18.432432 | 65 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue420/common_setup.py | # -*- coding: utf-8 -*-
import os.path
from lab.environments import MaiaEnvironment
from lab.steps import Step
from downward.checkouts import Translator, Preprocessor, Planner
from downward.experiments import DownwardExperiment
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the filename of the main script, e.g.
"/ham/spam/eggs.py" => "eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Found by searching upwards in the directory tree from the main
script until a directory with a subdirectory named ".hg" is found."""
path = os.path.abspath(get_script_dir())
while True:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
class MyExperiment(DownwardExperiment):
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"total_time",
"search_time",
"memory",
"expansions_until_last_jump",
]
"""Wrapper for DownwardExperiment with a few convenience features."""
def __init__(self, configs=None, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
suite=None, parsers=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
If "configs" is specified, it should be a dict of {nick:
cmdline} pairs that sets the planner configurations to test.
If "grid_priority" is specified and no environment is
specifically requested in **kwargs, use the maia environment
with the specified priority.
If "path" is not specified, the experiment data path is
derived automatically from the main script's filename.
If "repo" is not specified, the repository base is derived
automatically from the main script's path.
If "revisions" is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search.
If "search_revisions" is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All experiments use the
translator and preprocessor component of the first
revision.
If "suite" is specified, it should specify a problem suite.
If "parsers" is specified, it should be a list of paths to
parsers that should be run in addition to search_parser.py.
Options "combinations" (from the base class), "revisions" and
"search_revisions" are mutually exclusive."""
if grid_priority is not None and "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
num_rev_opts_specified = (
int(revisions is not None) +
int(search_revisions is not None) +
int(kwargs.get("combinations") is not None))
if num_rev_opts_specified > 1:
raise ValueError('must specify exactly one of "revisions", '
'"search_revisions" or "combinations"')
# See add_comparison_table_step for more on this variable.
self._HACK_revisions = revisions
if revisions is not None:
if not revisions:
raise ValueError("revisions cannot be empty")
combinations = [(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions]
kwargs["combinations"] = combinations
if search_revisions is not None:
if not search_revisions:
raise ValueError("search_revisions cannot be empty")
base_rev = search_revisions[0]
translator = Translator(repo, base_rev)
preprocessor = Preprocessor(repo, base_rev)
combinations = [(translator, preprocessor, Planner(repo, rev))
for rev in search_revisions]
kwargs["combinations"] = combinations
self._additional_parsers = parsers or []
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
if configs is not None:
for nick, config in configs.items():
self.add_config(nick, config)
if suite is not None:
self.add_suite(suite)
self._report_prefix = get_experiment_name()
def _make_search_runs(self):
DownwardExperiment._make_search_runs(self)
for i, parser in enumerate(self._additional_parsers):
parser_alias = 'ADDITIONALPARSER%d' % i
self.add_resource(parser_alias, parser, os.path.basename(parser))
for run in self.runs:
run.require_resource(parser_alias)
run.add_command('additional-parser-%d' % i, [parser_alias])
def add_comparison_table_step(self, attributes=None):
revisions = self._HACK_revisions
if revisions is None:
# TODO: It's not clear to me what a "revision" in the
# overall context of the code really is, e.g. when keeping
# the translator and preprocessor method fixed and only
# changing the search component. It's also not really
# clear to me how the interface of the Compare... reports
# works and how to use it more generally. Hence the
# present hack.
# Ideally, this method should look at the table columns we
# have (defined by planners and planner configurations),
# pair them up in a suitable way, either controlled by a
# convenience parameter or a more general grouping method,
# and then use this to define which pairs go together.
raise NotImplementedError(
"only supported when specifying revisions in __init__")
if attributes is None:
attributes = self.DEFAULT_TABLE_ATTRIBUTES
report = CompareRevisionsReport(*revisions, attributes=attributes)
self.add_report(report, outfile="%s-compare.html" % self._report_prefix)
def add_scatter_plot_step(self, attributes=None):
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
revisions = self._HACK_revisions
if revisions is None:
# TODO: See add_comparison_table_step.
raise NotImplementedError(
"only supported when specifying revisions in __init__")
if len(revisions) != 2:
# TODO: Should generalize this, too, by offering a general
# grouping function and then comparing any pair of
# settings in the same group.
raise NotImplementedError("need two revisions")
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plots():
configs = [conf[0] for conf in self.configs]
for nick in configs:
config_before = "%s-%s" % (revisions[0], nick)
config_after = "%s-%s" % (revisions[1], nick)
for attribute in attributes:
name = "%s-%s-%s" % (self._report_prefix, attribute, nick)
report = ScatterPlotReport(
filter_config=[config_before, config_after],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 9,190 | 37.456067 | 80 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue420/issue420-v1-regressions.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Before you can run the experiment you need to create duplicates of the
two tasks we want to test:
cd ../benchmarks/tidybot-opt11-strips
for i in {00..49}; do cp p14.pddl p14-$i.pddl; done
cd ../parking-opt11-strips
for i in {00..49}; do cp pfile04-015.pddl pfile04-015-$i.pddl; done
Don't forget to remove the duplicate tasks afterwards. Otherwise they
will be included in subsequent experiments.
"""
import common_setup
REVS = ["issue420-base", "issue420-v1"]
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"lmcut": ["--search", "astar(lmcut())"],
}
TEST_RUN = False
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = (["tidybot-opt11-strips:p14-%02d.pddl" % i for i in range(50)] +
["parking-opt11-strips:pfile04-015-%02d.pddl" % i for i in range(50)])
PRIORITY = 0 # number means maia experiment
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_comparison_table_step(
attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES
)
exp()
| 1,194 | 23.387755 | 83 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue488/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (node.endswith("cluster.bc2.ch") or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
# TODO: Once we have reference results, we should add "quality".
# TODO: Add something about errors/exit codes.
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" % (rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for attribute in valid_attributes:
name = "-".join([rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,741 | 35.614943 | 79 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue488/issue488.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
'astar_gapdb': [
'--search',
'astar(gapdb())'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue488-base", "issue488-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 550 | 18 | 54 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue665/v2.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
import os
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_optimal_with_ipc11()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())'],
driver_options=['--search-time-limit', '5m']),
}
exp = IssueExperiment(
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks'),
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
attribute = "total_time"
config_nick = 'astar-blind'
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config_nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config_nick)
)
exp()
main(revisions=['issue665-base', 'issue665-v2'])
| 1,242 | 26.021739 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue665/v1.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
import os
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_optimal_with_ipc11()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())'],
driver_options=['--search-time-limit', '5m']),
}
exp = IssueExperiment(
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks'),
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
attribute = "total_time"
config_nick = 'astar-blind'
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config_nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config_nick)
)
exp()
main(revisions=['issue665-base', 'issue665-v1'])
| 1,242 | 26.021739 | 82 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue665/common_setup.py | # -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, benchmarks_dir, suite, revisions=[], configs={},
grid_priority=None, path=None, test_suite=None,
email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(benchmarks_dir, suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,496 | 33.907821 | 83 | py |
DAAISy | DAAISy-main/dependencies/FD/experiments/issue665/relativescatter.py | # -*- coding: utf-8 -*-
#
# downward uses the lab package to conduct experiments with the
# Fast Downward planning system.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
import os
from lab import tools
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 4,690 | 35.937008 | 84 | py |
DAAISy | DAAISy-main/src/generate_random_states.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
import glob
import pickle
import subprocess
from src.config import *
from src.lattice import *
from src.utils import generate_ds, set_to_state
# DOMAIN = "barman"
def get_state_from_val(domain_file, problem_file, plan_file, objects, init_state):
param = VAL_PATH + " -v " + domain_file + " " + problem_file + " " + plan_file + " > " + GEN_VAL_FILE
p = subprocess.Popen([param], shell=True)
p.wait()
all_states = []
curr_state = set(copy.deepcopy(init_state))
# Scanning the VAL output
f = open(GEN_VAL_FILE, 'r')
for line in f:
# Adding a new policy rule
if "Checking next happening (time " in line or "Plan executed successfully" in line:
_state = State(set_to_state(curr_state), objects)
all_states.append(copy.deepcopy(_state))
if "Deleting " in line:
pred = line.replace("Deleting ", "").replace("(", "").replace(")\n", "").split(" ")
pred = "|".join(pred)
curr_state.discard(pred)
if "Adding " in line:
pred = line.replace("Adding ", "").replace("(", "").replace(")\n", "").split(" ")
pred = "|".join(pred)
curr_state.add(pred)
f.close()
return all_states
def run_ff(domain_file, problem_file, output_file):
param = FF_PATH + "ff"
param += " -o " + domain_file
param += " -f " + problem_file
param += " -i 120"
param += " > " + output_file
p = subprocess.Popen([param], shell=True)
p.wait()
return
def main(DOMAIN, DOMAINS_PATH, DOMAIN_FILE, PROBLEM_DIR, RANDOM_STATE_FOLDER, GEN_RESULT_FILE):
domain_file = DOMAINS_PATH + DOMAIN_FILE
problem_file_path = PROBLEM_DIR + "/*.pddl"
problem_file_l = glob.glob(problem_file_path)
problem_file_list = sorted(problem_file_l)
all_states = []
for problem_file in problem_file_list:
_, _, _, _, objects, _, init_state, domain_name = generate_ds(domain_file, problem_file)
run_ff(domain_file, problem_file, GEN_RESULT_FILE)
f = open(GEN_RESULT_FILE, "r")
_plan_found = False
_plan = ""
step = 0
for x in f:
if "found legal plan as follows" in x:
_plan_found = True
if not _plan_found:
continue
if str(step) + ":" in x:
k = copy.deepcopy(x)
_plan += str(step) + " : (" + k.lower().rstrip().split(":")[-1].lstrip() + ")\n"
step += 1
if "time spent" in x:
break
f.close()
f = open(GEN_PLAN_FILE, "w")
f.write(_plan)
f.close()
states = get_state_from_val(domain_file, problem_file, GEN_PLAN_FILE, objects, init_state)
all_states.extend(states)
if len(all_states) >= STORED_STATES_COUNT:
break
with open(RANDOM_STATE_FOLDER + "random_" + DOMAIN + ".pkl", "wb") as f:
pickle.dump(all_states[0:STORED_STATES_COUNT+1], f)
if __name__ == "__main__":
base_dir = os.getcwd()+"/"
DOMAIN = "termes"
DOMAINS_PATH = base_dir+"domains/"+DOMAIN+"/domains/"
DOMAIN_FILE = "domain.pddl"
PROBLEM_DIR = base_dir+"domains/"+DOMAIN+"/instances/"
RANDOM_STATE_FOLDER = base_dir+"/random_states/"
GEN_RESULT_FILE = RANDOM_STATE_FOLDER+"gen_res.txt"
main(DOMAIN, DOMAINS_PATH, DOMAIN_FILE, PROBLEM_DIR, RANDOM_STATE_FOLDER, GEN_RESULT_FILE) | 3,474 | 30.880734 | 105 | py |
DAAISy | DAAISy-main/src/config.py | #!/usr/local/bin/python3
# encoding: utf-8
import os
from enum import Enum, IntEnum
domains = ["blocksworld", "logistics", "parking", "satellite", "barman",
"gripper", "miconic", "rovers", "termes", "freecell"]
domain_dir_gym = "../dependencies/pddlgym/pddl/"
VERBOSE = False
base_dir = os.getcwd()+"/"
domain_name = "temp_domain"
final_result_dir = base_dir+"results/temp/"
final_result_prefix = "final_result_aaai21"
TEMP_FOLDER = final_result_dir+"temp_files/"
Q_DOMAIN_FILE = TEMP_FOLDER+"querymodel_domain.pddl"
Q_PROBLEM_FILE = TEMP_FOLDER+"querymodel_problem.pddl"
Q_PLAN_FILE = TEMP_FOLDER+"querymodel_plan.pddl"
Q_RESULT_FILE = TEMP_FOLDER+"res.txt"
ALL_ACTION_FILE = TEMP_FOLDER+"action_list.txt"
temp_plan_file = TEMP_FOLDER+"querymodel_temp_plan.pddl"
init_state_count = 1
save_random_states = True
VAL_PATH = base_dir+"dependencies/VAL/validate"
FF_PATH = base_dir+"dependencies/FF/"
FD_PATH = base_dir+"dependencies/FD/"
# Set FF for ffPlanner
# Set FD for fdPlanner
PLANNER = "FF"
NUM_PER_DOMAIN = 10
class Location(IntEnum):
PRECOND = 1
EFFECTS = 2
ALL = 3
class Literal(Enum):
AN = -2
NEG = -1
ABS = 0
POS = 1
AP = 2
NP = 3
num_random_states = 20
num_additional_states = 20
ignore_list = [[Literal.NEG, Literal.NEG], [Literal.POS, Literal.POS]]
pal_tuples_finalized = []
abs_actions_test = dict()
abs_preds_test = dict()
GEN_RESULT_FILE = TEMP_FOLDER + "gen_res.txt"
GEN_PLAN_FILE = TEMP_FOLDER + "gen_res.plan"
GEN_VAL_FILE = TEMP_FOLDER + "gen_res.val"
RANDOM_STATE_FOLDER = base_dir+"random_states/"
STORED_STATES_COUNT = 60
| 1,614 | 21.123288 | 72 | py |
DAAISy | DAAISy-main/src/agent.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
import importlib
from src.config import *
from src.query import ExecutePlan
from src.lattice import Model
from src.utils import state_to_set
class Agent:
def __init__(self, domain, pred_type_mapping, agent_model_actions):
self.agent_model = Model(pred_type_mapping, agent_model_actions)
self.agent_model.print_model()
def run_query(self, query, pal_tuple_dict, partial_check=False):
"""
:param query:
:param pal_tuple_dict:
:return:
"""
plan = ExecutePlan(self.agent_model, query['init_state'].state, query['plan'])
is_executable_agent, possible_state, failure_index = plan.execute_plan(pal_tuple_dict)
return is_executable_agent, failure_index, possible_state
| 812 | 26.1 | 94 | py |
DAAISy | DAAISy-main/src/__init__.py | 0 | 0 | 0 | py |
|
DAAISy | DAAISy-main/src/model_drift/agent_interrogation_interface.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
from itertools import product
from src.config import *
from src.utils import *
from src.agent import Agent
from src.utils import parse_files
from src.interrogation import AgentInterrogation
from src.lattice.model_pddl import Model
from src.model_drift.pa_tuple import PATuple
class AgentInterrogationInterface(object):
def __init__(self, domain_file_path, problem_file_path):
self.domain_file_path = domain_file_path
self.problem_file_path = problem_file_path
self.abstract_model = None
self.abstract_predicates = None
self.data = None
def get_inferred_pals(self, init_model, PAtuple_to_ModeTuple_set_dict):
"""
pal_tuples_fixed has PALs with one valid mode
pal_tuples_set_modes has restricted set of modes for each PAL
"""
abstract_predicates_inferred, abstract_model_actions_inferred = dict(), dict()
PALtuples_inferred, PALtuples_mode_inferred = list(), list()
PATuple_to_valid_ModeTuple_set_dict = dict()
for PATuple, modepair_set in PAtuple_to_ModeTuple_set_dict.items():
action_name, predicate = PATuple.action, PATuple.predicate
pre, eff = set(), set()
for modepair in modepair_set:
pre.add(modepair[0])
eff.add(modepair[1])
PATuple_to_valid_ModeTuple_set_dict[tuple([action_name, predicate])] = modepair_set
if len(pre)==1 or len(eff)==1:
abstract_predicates_inferred[predicate] = 0
abstract_model_actions_inferred[action_name] = dict()
if len(pre)==1 and len(eff)==1:
abstract_model_actions_inferred[action_name][predicate] = list(modepair)
PALtuples_inferred.append(tuple([action_name, predicate, Location.PRECOND]))
PALtuples_inferred.append(tuple([action_name, predicate, Location.EFFECTS]))
PALtuples_mode_inferred.append(tuple([action_name, predicate, Location.PRECOND, modepair[0]]))
PALtuples_mode_inferred.append(tuple([action_name, predicate, Location.EFFECTS, modepair[1]]))
elif len(pre)==1:
init_modepair = init_model.actions[action_name][predicate]
abstract_model_actions_inferred[action_name][predicate] = [modepair[0], init_modepair[1]]
PALtuples_inferred.append(tuple([action_name, predicate, Location.PRECOND]))
PALtuples_mode_inferred.append(tuple([action_name, predicate, Location.PRECOND, modepair[0]]))
else:
init_modepair = init_model.actions[action_name][predicate]
abstract_model_actions_inferred[action_name][predicate] = [init_modepair[0], modepair[1]]
PALtuples_inferred.append(tuple([action_name, predicate, Location.EFFECTS]))
PALtuples_mode_inferred.append(tuple([action_name, predicate, Location.EFFECTS, modepair[1]]))
return abstract_predicates_inferred, abstract_model_actions_inferred, PALtuples_inferred, PALtuples_mode_inferred, PATuple_to_valid_ModeTuple_set_dict
def get_certainly_changed_pals(self, init_model, PAtuple_to_ModeTuple_set_dict):
"""
PALs in init model that inconsistent with new observations
"""
PALtuples_changed = list()
for action, predicate_modepair_dict in init_model.actions.items():
for predicate, init_modepair in predicate_modepair_dict.items():
init_modepair = tuple(list(init_modepair))
init_PATuple = PATuple(predicate, action)
if init_PATuple in PAtuple_to_ModeTuple_set_dict.keys():
drifted_modepairs = PAtuple_to_ModeTuple_set_dict[init_PATuple]
if init_modepair not in drifted_modepairs:
# PALs that have certainly changed
# atleast one of pre and eff changed, dropping/marking just pre PATuple
PALtuples_changed.append(tuple([action, predicate, Location.PRECOND]))
# PALtuples_changed.append(tuple([action, predicate, Location.EFFECTS]))
return PALtuples_changed
def get_model_with_dropped_and_fixed_palms(self, init_model, abstract_model_actions_fixed, PALtuples_dropped, PALtuples_dropped_no_obs):
abstract_predicates = dict()
abstract_model_actions = dict()
# first, copy mode tuples for each PA in initial estimate of model
for action, predicate_modepair_dict in init_model.actions.items():
abstract_model_actions[action] = dict()
for predicate, init_modepair in predicate_modepair_dict.items():
abstract_predicates[predicate] = 0
abstract_model_actions[action][predicate] = init_modepair
# second, set mode tuple for certainly changed PA whose actions are observed
for PAL in PALtuples_dropped:
action, predicate = PAL[0], PAL[1]
init_mode_eff = init_model.actions[action][predicate][1]
abstract_model_actions[action][predicate] = [Literal.ABS, init_mode_eff]
# third, set mode tuple for certainly changed PA whose actions are observed
for PAL in PALtuples_dropped_no_obs:
action, predicate = PAL[0], PAL[1]
init_mode_eff = init_model.actions[action][predicate][1]
abstract_model_actions[action][predicate] = [Literal.ABS, Literal.ABS]
# finally, set mode tuple for certainly inferred PA
for action, predicate_modepair_dict in abstract_model_actions_fixed.items():
for predicate, modepair in predicate_modepair_dict.items():
abstract_predicates[predicate] = 0
abstract_model_actions[action][predicate] = modepair
abstract_model = Model(abstract_predicates, abstract_model_actions)
return abstract_model, abstract_predicates
def get_pals_for_action(self, pals, action):
pal_set = set()
for pal in pals:
if pal[0]==action:
pal_set.add(pal)
return pal_set
def compute_abstract_model(self, init_model, PAtuple_to_ModeTuple_set_dict, init_PAtuple_to_ModeTuple_dict, data):
all_pals = set()
for PAtuple in init_PAtuple_to_ModeTuple_dict.keys():
all_pals.add(tuple([PAtuple.action, PAtuple.predicate, Location.PRECOND]))
all_pals.add(tuple([PAtuple.action, PAtuple.predicate, Location.EFFECTS]))
# from negative examples i.e. PALs corresponding to marked actions found by comparing optimal obs of init and drifte models
marked_pals_no_obs = set()
marked_pals_obs = set()
for action in data["marked_changed_actions"]:
if action in data["actions_with_no_obs"]:
marked_pals_no_obs = marked_pals_no_obs.union(self.get_pals_for_action(all_pals, action))
else:
marked_pals_obs = marked_pals_obs.union(self.get_pals_for_action(all_pals, action))
# from positive examples i.e. available observations
abstract_predicates_inferred, abstract_model_actions_inferred, PALtuples_inferred, PALtuples_mode_inferred, PATuple_to_valid_ModeTuple_set_dict = self.get_inferred_pals(init_model, PAtuple_to_ModeTuple_set_dict)
PALtuples_changed = self.get_certainly_changed_pals(init_model, PAtuple_to_ModeTuple_set_dict)
PALtuples_dropped = set(PALtuples_changed).union(marked_pals_obs) - set(PALtuples_inferred)
PALtuples_dropped_no_obs = marked_pals_no_obs - set(PALtuples_dropped) - set(PALtuples_inferred)
PALtuples_fixed = all_pals.union(set(PALtuples_inferred)) - set(PALtuples_dropped) - set(PALtuples_dropped_no_obs)
self.abstract_model, self.abstract_predicates = self.get_model_with_dropped_and_fixed_palms(init_model, abstract_model_actions_inferred, PALtuples_dropped, PALtuples_dropped_no_obs)
self.data = data
self.data["PALtuples_fixed"] = PALtuples_fixed
self.data["PALtuples_dropped"] = PALtuples_dropped
self.data["PALtuples_dropped_no_obs"] = PALtuples_dropped_no_obs
self.data["PATuple_to_valid_ModeTuple_set_dict"] = PATuple_to_valid_ModeTuple_set_dict
print("\n PALs dropped:")
for PALtuple in PALtuples_dropped:
print(PALtuple)
return self.data
def learn_model_from_scratch(self):
# Run AIA baseline/Learn model from scratch
print("============================= Learning model from scratch (",self.domain_file_path,") ==============================")
action_parameters, pred_type_mapping, agent_model_actions, abstract_model_actions, \
objects, types, init_state, domain_name = parse_files.generate_ds(self.domain_file_path, self.problem_file_path)
agent = Agent(domain_name, pred_type_mapping, agent_model_actions)
abstract_predicates = dict()
abstract_model = Model(abstract_predicates, abstract_model_actions)
iaa_main = AgentInterrogation(agent, abstract_model, objects, domain_name, abstract_predicates, pred_type_mapping, action_parameters, types)
total, unique, failed, repeated, init_running_time, data_dict, pal_tuple_count, pal_tuple_order, valid_models, _ = iaa_main.agent_interrogation_algo()
return total, unique, failed, repeated, valid_models
def learn_model_with_prior(self):
action_parameters, pred_type_mapping, agent_model_actions, abstract_model_actions, \
objects, types, init_state, domain_name = parse_files.generate_ds(self.domain_file_path, self.problem_file_path)
agent = Agent(domain_name, pred_type_mapping, agent_model_actions)
iaa_main = AgentInterrogation(agent, self.abstract_model, objects, domain_name, self.abstract_predicates, pred_type_mapping, action_parameters, types)
# Provide knowledge of pal_tuples_fixed, pal_tuples_set_modes, PALrank, PALorder to AIA
print("Running AIA..")
self.data["query_info"].clear()
total, unique, failed, repeated, running_time, data_dict, pal_tuple_count, pal_tuple_order, valid_models, self.data = iaa_main.agent_interrogation_algo(self.data)
return total, unique, failed, repeated, valid_models, iaa_main
| 10,486 | 55.686486 | 224 | py |
DAAISy | DAAISy-main/src/model_drift/infer_valid_modes.py | #!/usr/local/bin/python3
# encoding: utf-8
import glob
from src.utils.translate import pddl_parser
from src.utils.translate import pddl_fd as pddl
from src.config import Literal
from src.model_drift import PATuple
"""
Computes set of valid modes for an agent's model using positive examples
"""
class ValidModesInference(object):
def __init__(self, lifted_action_statepair_dict, predicates, actions):
self.lifted_action_statepair_dict = lifted_action_statepair_dict
self.predicates = predicates
self.actions = actions
self.predicate_presence_to_mode_tuples = dict()
self.PATuple_predicate_presence_dict = dict()
self.PAtuple_to_ModeTuple_set_dict = dict()
self.initialize_predicate_presence_to_possible_mode_tuples()
def initialize_predicate_presence_to_possible_mode_tuples(self):
"""
A static map of set of observations to set of possible mode tuples (Pre mode, Eff mode) explained by the observations
Observation = tuple of predicate presence (positive or negative) in s and s'
"""
self.predicate_presence_order = [('p','p'), ('p','-p'), ('-p','p'),('-p','-p')]
self.predicate_presence_to_mode_tuples[tuple([('p', 'p')])] = set([(Literal.POS, Literal.ABS), (Literal.ABS, Literal.POS), (Literal.ABS, Literal.ABS)])
self.predicate_presence_to_mode_tuples[tuple([('p', 'p'),('-p','p')])] = set([(Literal.ABS, Literal.POS)])
self.predicate_presence_to_mode_tuples[tuple([('p', 'p'),('-p','-p')])] = set([(Literal.ABS, Literal.ABS)])
self.predicate_presence_to_mode_tuples[tuple([('p', '-p')])] = set([(Literal.POS, Literal.NEG), (Literal.ABS, Literal.NEG)])
self.predicate_presence_to_mode_tuples[tuple([('p', '-p'),('-p', '-p')])] = set([(Literal.ABS, Literal.NEG)])
self.predicate_presence_to_mode_tuples[tuple([('-p', 'p')])] = set([(Literal.NEG, Literal.POS), (Literal.ABS, Literal.POS)])
self.predicate_presence_to_mode_tuples[tuple([('-p', '-p')])] = set([(Literal.NEG, Literal.ABS), (Literal.ABS, Literal.NEG), (Literal.ABS, Literal.ABS)])
def get_predicates_from_state_with_predicate_name(self, state, predicate_name):
"""
Get set of predicates from state that match the given predicate name
"""
predicates = set()
for predicate in state.literals:
if predicate.name==predicate_name:
predicates.add(predicate)
return predicates
def set_predicate_presence(self, set_predicates, action, presence_tuple):
for predicate in set_predicates:
PATupleObj = PATuple(predicate, action)
if PATupleObj not in self.PATuple_predicate_presence_dict.keys():
self.PATuple_predicate_presence_dict[PATupleObj] = set()
self.PATuple_predicate_presence_dict[PATupleObj].add(presence_tuple)
def compute_PAtuple_to_predicate_presence(self):
"""
Computes a map of Predicate-Action Tuple to set of tuples of predicate presence in state and next state e.g. [('p','p'),('-p','p')] from observations
"""
for action, set_statepair in self.lifted_action_statepair_dict.items():
for predicate_obj in self.predicates:
predicate_name = predicate_obj.name
for statepair in set_statepair:
state_predicates = self.get_predicates_from_state_with_predicate_name(statepair[0], predicate_name)
next_state_predicates = self.get_predicates_from_state_with_predicate_name(statepair[1], predicate_name)
intersection_set_predicates = state_predicates.intersection(next_state_predicates)
left_outer_set_predicates = state_predicates - intersection_set_predicates
right_outer_set_predicates = next_state_predicates - intersection_set_predicates
# TO DO: case in intersection_set_predicates: can't find for (-p, -p)
self.set_predicate_presence(intersection_set_predicates, action, tuple(['p','p']))
self.set_predicate_presence(left_outer_set_predicates, action, tuple(['p','-p']))
self.set_predicate_presence(right_outer_set_predicates, action, tuple(['-p','p']))
order = {t: i for i, t in enumerate(self.predicate_presence_order)}
for PATuple in self.PATuple_predicate_presence_dict.keys():
ordered = sorted(list(self.PATuple_predicate_presence_dict[PATuple]), key=lambda x: order[x])
self.PATuple_predicate_presence_dict[PATuple] = tuple(ordered)
def compute_PAtuple_to_ModeTuple_set_dict(self):
"""
Computes a map of Predicate-Action tuple to mode tuples i.e. (pre mode, eff mode)
"""
for PATuple, predicate_presence in self.PATuple_predicate_presence_dict.items():
self.PAtuple_to_ModeTuple_set_dict[PATuple] = dict()
size = len(self.predicate_presence_to_mode_tuples[tuple(predicate_presence)])
for mode_tuple in self.predicate_presence_to_mode_tuples[predicate_presence]:
self.PAtuple_to_ModeTuple_set_dict[PATuple][mode_tuple] = 1.0/size
def transform_PAtuple(self, PAtuple):
"""
Transform params in Predicate-Action tuple to ids separated by "|" (AIA's requirement)
"""
action_name = PAtuple.action.name
action = action_name
param_to_id = dict()
id_ = 0
for param in PAtuple.action.parameters:
param_to_id[param.name] = id_
action+='|'+str(id_)
id_ += 1
predicate = PAtuple.predicate.name
for arg in PAtuple.predicate.args:
predicate+='|'+str(param_to_id[arg])
return PATuple(predicate, action_name)
def transform_PAtuple_to_ModeTuple_set_dict(self):
"""
Transform params in Predicate-Action tuple to ids separated by "|" (AIA's requirement)
"""
transformed_PAtuple_to_ModeTuple_set_dict = dict()
for PATuple, modepair_set in self.PAtuple_to_ModeTuple_set_dict.items():
PATuple = self.transform_PAtuple(PATuple)
transformed_PAtuple_to_ModeTuple_set_dict[PATuple] = modepair_set
self.PAtuple_to_ModeTuple_set_dict = transformed_PAtuple_to_ModeTuple_set_dict
def compute_valid_modes(self):
self.compute_PAtuple_to_predicate_presence()
self.compute_PAtuple_to_ModeTuple_set_dict()
self.transform_PAtuple_to_ModeTuple_set_dict()
| 6,629 | 51.619048 | 161 | py |
DAAISy | DAAISy-main/src/model_drift/generate_observations.py | #!/usr/local/bin/python3
# encoding: utf-8
import os
import copy
import glob
import random
import re
from itertools import product
import pickle
import math
from dependencies.fama.src import planning
from src.utils.translate import pddl_parser
from src.utils.translate import pddl_fd as pddl
from src.utils import *
class ObservationGenerator(object):
def __init__(self, example_dir, domains_dir, data):
self.example_dir = example_dir
self.domains_dir = domains_dir
self.data = data
self.actions = None
def parse_atom_to_literal(self, atom):
if not isinstance(atom, pddl.f_expression.FunctionAssignment) and atom.predicate!="=":
name_ = atom.predicate
args_ = list()
for arg in atom.args:
args_.append(str(arg))
return planning.Literal(name_,args_)
return atom
def apply_action(self, state, action):
next_state = copy.deepcopy(state)
for effect in action.effects:
atom = effect.literal
literal = self.parse_atom_to_literal(atom)
if atom.negated == True:
positive_literal = copy.deepcopy(literal)
positive_literal.negated = False
next_state.delLiteral(positive_literal)
else:
next_state.addLiteral(literal)
return next_state
def get_init_state(self, fd_domain, fd_task):
init_state=planning.State([])
for l in fd_task.init:
if l.predicate!="=":
init_state.addLiteral(self.parse_atom_to_literal(l))
return init_state
def get_type_to_objects(self, fd_task, type_to_objects=None):
if not type_to_objects:
type_to_objects = dict()
for item in fd_task.objects:
if item.type_name not in type_to_objects.keys():
type_to_objects[item.type_name] = set()
type_to_objects[item.type_name].add(item.name)
return type_to_objects
def get_objs_params_maps(self, objs, params):
"""
Get objs to params map and params to objs map
"""
param_to_obj, obj_to_param = dict(), dict()
for i in range(len(params)):
typed_obj = copy.deepcopy(params[i])
param_to_obj[typed_obj.name] = objs[i]
obj_to_param[objs[i]] = typed_obj.name
return param_to_obj, obj_to_param
def replace_parameters_in_action(self, action_to_be_modified, param_to_param):
"""
Replaces params with objs or objs with params i.e. converts grounded_action to lifted_action or other way around
"""
action = copy.deepcopy(action_to_be_modified)
parameters = list()
for param in action.parameters:
param.name = param_to_param[param.name]
parameters.append(param)
action.parameters = parameters
effects = list()
for eff in action.effects:
args = list()
for arg in eff.literal.args:
args.append(param_to_param[arg])
eff.literal.args = tuple(args)
effects.append(eff)
action.effects = effects
precondition = list()
if len(action.precondition.parts)>0:
for pre in action.precondition.parts:
args = list()
for arg in pre.args:
args.append(param_to_param[arg])
pre.args = tuple(args)
precondition.append(pre)
else:
pre = action.precondition
args = list()
for arg in pre.args:
args.append(param_to_param[arg])
pre.args = tuple(args)
precondition.append(pre)
action.precondition = precondition
return action
def get_relevant_parameterized_predicates_set(self, state, obj_to_param):
"""
Get predicates in a state relevant for the action i.e. predicates whose arguments are definitely in the action's arguments
"""
relevant_state = copy.deepcopy(state)
relevant_literals = list()
for literal in state.literals:
relevant_flag = True
for arg in literal.args:
if arg not in obj_to_param.keys():
relevant_flag = False
break
if relevant_flag:
parameterized_literal = copy.deepcopy(literal)
args = list()
for obj in parameterized_literal.args:
args.append(obj_to_param[obj])
parameterized_literal.args = args
relevant_literals.append(parameterized_literal)
relevant_state.literals = relevant_literals
return relevant_state
def get_subplans(self, plan):
subplans = set()
for i in range(len(plan)):
for j in range(i,len(plan)):
subplans.add(tuple(plan[i:j+1]))
return subplans
def generate_optimal_observations(self, domain_file, problem_dir, observation_dir, problem_file=None, max_obs=math.inf):
"""
given a problem file, generates an optimal plan and
returns actions to state-nextstate map, lifted action to relevant state-nextstate, all optimal plans and their subplans
"""
domain_path = self.domains_dir+domain_file
problem_paths = list()
if problem_file:
problem_paths = [problem_dir+problem_file]
else:
problem_paths = sorted(glob.glob(problem_dir+"*.pddl"),key = lambda x : float(re.findall("(\d+)",x)[0]))
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", domain_path)
domain_name, domain_requirements, types, type_dict, constants, \
predicates, predicate_dict, functions, self.actions, axioms = pddl_parser.parsing_functions.parse_domain_pddl(fd_domain)
action_to_statepair_set_dict = dict()
lifted_action_to_relevant_parameterized_statepair_set_dict = dict()
problem_optimal_plan_to_optimal_subplans = dict()
type_to_objects = dict()
i = 0
for problem_path in problem_paths:
problem_id = problem_path.split("/")[-1].split("-")[1].split(".")[0]
observation_file = observation_dir+"plan-"+str(problem_id)
action_parameters, pred_type_mapping, agent_model_actions, abstract_model_actions, \
objects, types, init_state, domain_name = parse_files.generate_ds(domain_path, problem_path)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_path)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
type_to_objects = self.get_type_to_objects(fd_task, type_to_objects)
print("Attempting to generate plan for ",problem_path)
if not os.path.exists(observation_file):
os.chdir(self.example_dir+"../../dependencies/FD/")
cmd = "./fast-downward.py "
cmd+= "--search-time-limit 60 "
cmd+= "--plan-file "+observation_file+" "+domain_path+" "+problem_path+" --search 'astar(lmcut())' > "+observation_dir+"output.txt"
print(cmd)
os.system(cmd)
print("Generated optimal plan from ",domain_path," and\n ",problem_path)
else:
print("Observation file already exists for ",problem_path)
init_state = self.get_init_state(fd_domain, fd_task)
state = copy.deepcopy(init_state)
if os.path.exists(observation_file):
optimal_plan = list()
for line in open(observation_file,"r"):
if ";" not in line:
line = line.replace("(","")
line = line.replace(")","")
action_name = line.split(" ")[0]
objs = line.replace("\n","").split(" ")[1:]
lifted_action = None
for action_obj in self.actions:
if action_obj.name==action_name:
lifted_action = action_obj
params = lifted_action.parameters
param_to_obj, obj_to_param = self.get_objs_params_maps(objs, params)
action = self.replace_parameters_in_action(lifted_action, param_to_obj)
next_state = self.apply_action(state, action)
if action_name not in action_to_statepair_set_dict.keys():
action_to_statepair_set_dict[action_name] = dict()
if action not in action_to_statepair_set_dict[action_name].keys():
action_to_statepair_set_dict[action_name][action] = set()
action_to_statepair_set_dict[action_name][action].add(tuple([state, next_state]))
optimal_plan.append((state, action, next_state))
relevant_parameterized_state = self.get_relevant_parameterized_predicates_set(state, obj_to_param)
relevant_parameterized_next_state = self.get_relevant_parameterized_predicates_set(next_state, obj_to_param)
if lifted_action not in lifted_action_to_relevant_parameterized_statepair_set_dict:
lifted_action_to_relevant_parameterized_statepair_set_dict[lifted_action] = set()
lifted_action_to_relevant_parameterized_statepair_set_dict[lifted_action].add(tuple([relevant_parameterized_state, relevant_parameterized_next_state]))
state = copy.deepcopy(next_state)
i += 1
if i == max_obs:
break
problem_optimal_plan_to_optimal_subplans[(problem_id, tuple(optimal_plan))] = self.get_subplans(optimal_plan)
if i == max_obs:
break
actions_with_no_obs = set()
for action in self.actions:
action_name = action.name
if action_name not in action_to_statepair_set_dict.keys():
actions_with_no_obs.add(action_name)
self.data["actions_with_no_obs"] = actions_with_no_obs
i = 0
string = "Total number of actions:"+str(len(self.actions))+"\n"
string += "Total number of actions in observations:\n"
for action, statepair_set in action_to_statepair_set_dict.items():
string += str(i)+")"+str(action)+":"+str(len(statepair_set))+" states\n"
i += 1
with open(observation_dir+"number_actions.txt","w") as f:
f.write(string)
self.data["action_to_statepair_set"] = action_to_statepair_set_dict
self.data["type_to_objects"] = type_to_objects
self.data["problem_optimal_plan_to_optimal_subplans"] = problem_optimal_plan_to_optimal_subplans
return action_to_statepair_set_dict, lifted_action_to_relevant_parameterized_statepair_set_dict, type_to_objects, predicates, self.actions, self.data
######################## Observation tree for init model ###################################
def generate_observations(self, init_domains_dir, init_domain_file, problem_dir):
"""
generates a tree with state as nodes and actions as edges for diversity in actions
returns action to state-nextstate map
"""
num_obs = 20
init_domain_path = init_domains_dir+init_domain_file
problem_files = sorted(glob.glob(problem_dir+"*.pddl"))
fd_domain = pddl_parser.pddl_file.parse_pddl_file("domain", init_domain_path)
domain_name, domain_requirements, types, type_dict, constants, \
predicates, predicate_dict, functions, self.actions, axioms = pddl_parser.parsing_functions.parse_domain_pddl(fd_domain)
grounded_action_statepairset_dict = dict()
for i in range(len(problem_files)):
problem_path = problem_files[i]
action_parameters, pred_type_mapping, agent_model_actions, abstract_model_actions, \
objects, types, init_state, domain_name = parse_files.generate_ds(init_domain_path, problem_path)
fd_problem = pddl_parser.pddl_file.parse_pddl_file("task", problem_path)
fd_task = pddl_parser.pddl_file.parsing_functions.parse_task(fd_domain, fd_problem)
type_to_objects = self.get_type_to_objects(fd_task)
init_state = self.get_init_state(fd_domain, fd_task)
grounded_actions = self.get_concrete_actions(init_domain_path, problem_path, self.actions, type_to_objects)
grounded_action_statepairset_dict = self.get_action_statepairset_dict(grounded_action_statepairset_dict, init_state, grounded_actions, num_obs)
self.data["action_to_statepair_set_init"] = grounded_action_statepairset_dict
action_name_to_action = dict()
for action in self.actions:
action_name_to_action[action.name] = action
self.data["action_name_to_action"] = action_name_to_action
return self.data
def generate_problem_for_init_model(self, init_state, goal_state, problem_file, init_problem_path):
string = ""
for line in open(problem_file, 'r'):
if "init" in line or "INIT" in line:
break
string += line
init_string = ""
for literal in init_state.literals:
init_string += "("+literal.name+" "+" ".join(literal.args)+")"
goal_string = ""
for literal in goal_state.literals:
goal_string += "("+literal.name+" "+" ".join(literal.args)+")"
string += "\n(:INIT "+init_string+")"
string += "\n(:goal (AND "+goal_string+"))\n)"
with open(init_problem_path, "w") as f:
f.write(string)
f.close()
def get_state_action_next_state_from_observation_file(self, init_observation_file, init_state):
init_plan = list()
state = copy.deepcopy(init_state)
for line in open(init_observation_file,"r"):
if ";" not in line:
line = line.replace("(","")
line = line.replace(")","")
action_name = line.split(" ")[0]
objs = line.replace("\n","").split(" ")[1:]
lifted_action = None
for action_obj in self.actions:
if action_obj.name==action_name:
lifted_action = action_obj
params = lifted_action.parameters
param_to_obj, obj_to_param = self.get_objs_params_maps(objs, params)
action = self.replace_parameters_in_action(lifted_action, param_to_obj)
next_state = self.apply_action(state, action)
init_plan.append((state, action, next_state))
state = copy.deepcopy(next_state)
return init_plan
def get_negative_examples(self, domains_dir, init_domain_file, init_problem_dir, init_observation_dir):
"""
gives problem solved by drifted model to the init model and find plan
computes a map of plans found by drifted model to plans found by init model for the same problems
"""
init_domain_id = init_domain_file.split(".")[0]
init_domain_path = domains_dir+init_domain_file
self.data["drifted_plan_to_init_plan_same_length"] = dict()
self.data["drifted_plan_to_init_plan_shorter_length"] = dict()
if not os.path.exists(init_problem_dir+init_domain_id+"/"):
os.makedirs(init_problem_dir+init_domain_id+"/")
if not os.path.exists(init_observation_dir+init_domain_id+"/"):
os.makedirs(init_observation_dir+init_domain_id+"/")
if not os.path.exists(init_observation_dir+init_domain_id+"/less_or_same_length_plans_pickle_"+str(len(self.data["problem_optimal_plan_to_optimal_subplans"]))+".obj"):
for tup in self.data["problem_optimal_plan_to_optimal_subplans"]:
problem_id = tup[0]
optimal_plan = tup[1]
problem_file = self.example_dir+"instances/instances/instance-"+str(problem_id)+".pddl"
subplan_id = 1
for subplan in self.data["problem_optimal_plan_to_optimal_subplans"][tup]:
init_problem_file = init_problem_dir+init_domain_id+"/instance-"+str(problem_id)+"-"+str(subplan_id)+".pddl"
init_state, goal_state = subplan[0][0], subplan[-1][2]
self.generate_problem_for_init_model(init_state, goal_state, problem_file, init_problem_file)
init_observation_file = init_observation_dir+init_domain_id+"/plan-"+str(problem_id)+"-"+str(subplan_id)
os.chdir(self.example_dir+"../../dependencies/FD/")
cmd = "./fast-downward.py "
cmd += "--search-time-limit 60 "
cmd += "--plan-file "+init_observation_file+" "+init_domain_path+" "+init_problem_file+" --search 'astar(lmcut())' > "+init_observation_dir+"output.txt"
os.system(cmd)
if os.path.exists(init_observation_file):
print("Generated optimal plan from ",init_domain_path," and\n ",init_problem_file)
init_plan = self.get_state_action_next_state_from_observation_file(init_observation_file, init_state)
if len(init_plan) == len(subplan):
self.data["drifted_plan_to_init_plan_same_length"][(subplan_id, subplan)] = init_plan
elif len(init_plan) < len(subplan):
self.data["drifted_plan_to_init_plan_shorter_length"][(subplan_id, subplan)] = init_plan
subplan_id += 1
f = open(init_observation_dir+init_domain_id+"/less_or_same_length_plans_pickle.obj","wb")
pickle.dump((self.data["drifted_plan_to_init_plan_same_length"],self.data["drifted_plan_to_init_plan_shorter_length"]), f)
f.close()
else:
file = open(init_observation_dir+init_domain_id+"/less_or_same_length_plans_pickle.obj","rb")
object_file = pickle.load(file)
self.data["drifted_plan_to_init_plan_same_length"] = object_file[0]
self.data["drifted_plan_to_init_plan_shorter_length"] = object_file[1]
file.close()
plan_id_to_length_drifted_init_tuple = dict()
plan_id_drifted_plan_to_init_plan_shorter_length = dict()
marked_changed_actions = set()
marked_changed_action_effect = set()
marked_possibly_unchanged_actions = set()
for tup, init_plan in self.data["drifted_plan_to_init_plan_shorter_length"].items():
subplan_id, drifted_plan = tup[0], tup[1]
plan_id_to_length_drifted_init_tuple[subplan_id] = (len(drifted_plan),len(init_plan))
drifted_action_sequence, init_action_sequence = list(), list()
for tup in drifted_plan:
drifted_action_sequence.append(tup[1].name)
for tup in init_plan:
init_action_sequence.append(tup[1].name)
plan_id_drifted_plan_to_init_plan_shorter_length[(subplan_id, tuple(drifted_action_sequence))] = tuple(init_action_sequence)
for i in range(len(drifted_plan)):
if i < len(init_plan)-1:
if init_plan[i][1].name==drifted_plan[i][1].name:
if init_plan[i][2]==drifted_plan[i][2]:
marked_possibly_unchanged_actions.add(init_plan[i][1].name)
else:
marked_changed_actions.add(init_plan[i][1].name)
marked_changed_action_effect.add(init_plan[i][1].name)
else:
# get first changed actions in the plans
marked_changed_actions.add(init_plan[i][1].name)
break
else:
break
self.data["marked_changed_actions"] = marked_changed_actions
if len(marked_changed_actions)>0:
print("yay")
return self.data
def get_concrete_actions(self, domain_path, problem_path, actions, type_to_objects):
concrete_actions = list()
action_to_arg_combi = dict()
for action in actions:
action_name = action.name
type_list = list()
for arg in action.parameters:
type_list.append(arg.type_name)
obj_list_list = list()
for type_ in type_list:
obj_list_list.append(list(type_to_objects[type_]))
arg_combinations = list(product(*obj_list_list))
action_to_arg_combi[action_name] = dict()
action_to_arg_combi[action_name] = arg_combinations
for action in actions:
arg_combi = action_to_arg_combi[action.name]
operator = action.name
for arg_combi in action_to_arg_combi[operator]:
concrete_action = copy.deepcopy(action)
param_to_obj = dict()
i = 0
for param in concrete_action.parameters:
param_to_obj[param.name] = arg_combi[i]
i += 1
concrete_action = self.replace_parameters(concrete_action,param_to_obj)
concrete_actions.append(concrete_action)
return concrete_actions
def replace_parameters(self, concrete_action, param_to_obj):
parameters = list()
for param in concrete_action.parameters:
param.name = param_to_obj[param.name]
parameters.append(param)
concrete_action.parameters = parameters
effects = list()
for eff in concrete_action.effects:
args = list()
for arg in eff.literal.args:
args.append(param_to_obj[arg])
eff.literal.args = tuple(args)
effects.append(eff)
concrete_action.effects = effects
precondition = list()
if len(concrete_action.precondition.parts)>0:
for pre in concrete_action.precondition.parts:
args = list()
for arg in pre.args:
args.append(param_to_obj[arg])
pre.args = tuple(args)
precondition.append(pre)
else:
pre = concrete_action.precondition
args = list()
for arg in pre.args:
args.append(param_to_obj[arg])
pre.args = tuple(args)
precondition.append(pre)
concrete_action.precondition = precondition
return concrete_action
def is_executable(self, state, action):
precondition_literals = list()
for atom in action.precondition:
literal = self.parse_atom_to_literal(atom)
if state.findLiteral(literal)==-1:
return False
return True
def get_action_statepairset_dict(self, action_statepairset_dict, init_state, actions, num_obs):
"""
computes a map of grounded actions to set of state and next state pairs
"""
queue = list()
queue.append(init_state)
completed = False
count = 0
while len(queue)>0 and count<num_obs:
state = queue.pop(0)
for action in actions:
if self.is_executable(state, action):
next_state = self.apply_action(state, action)
action_name = action.name
if action_name not in action_statepairset_dict.keys():
action_statepairset_dict[action_name] = dict()
if action not in action_statepairset_dict[action_name].keys():
action_statepairset_dict[action_name][action] = set()
tup = tuple([state, next_state])
if tup not in action_statepairset_dict[action_name][action]:
action_statepairset_dict[action_name][action].add(tup)
queue.append(next_state)
count += 1
if count==num_obs:
completed = True
break
if completed:
break
return action_statepairset_dict
| 24,651 | 47.054581 | 175 | py |
DAAISy | DAAISy-main/src/model_drift/pa_tuple.py | #!/usr/local/bin/python3
# encoding: utf-8
class PATuple:
def __init__(self, predicate=None, action=None):
self.predicate = predicate
self.action = action
def __str__(self):
return f"PATuple(predicate={self.predicate.__str__()}, action={str(self.action)})"
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (hash(str(self)) == hash(str(other)))
def __repr__(self):
return str(self)
| 480 | 23.05 | 90 | py |
DAAISy | DAAISy-main/src/model_drift/plot.py | #!/usr/local/bin/python3
# encoding: utf-8
import csv
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from fileinput import filename
from collections import OrderedDict
def plot_1(results, file, title, x_axis_enforce_markings=True):
SMALL_SIZE = 9
MEDIUM_SIZE = 35
BIGGER_SIZE = 45
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=25) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)
plt.rc('font.sans-serif')
fig, ax1 = plt.subplots(figsize=(12, 6.5))
x = [i/int(results["domain_to_total_pals"]) for i in results["num_pals_incorrect"]]
l1 = ax1.plot(x, results["queries_scratch"], color="1", label="Number of Queries by AIA")[0]
l2 = ax1.plot(x, results["queries"], color="tab:blue", label="Number of Queries by DAAISy",linewidth=2.5)[0] #blue
plt.fill_between(x, np.array(results["queries"])-np.array(results["queries_std_dev"]), np.array(results["queries"])+np.array(results["queries_std_dev"]), alpha = 0.20, color="tab:blue") #blue
ax2 = ax1.twinx()
l3 = ax2.plot(x, results["initial_model_accuracy"], color="tab:gray", label="Accuracy of initial model", linestyle="dashdot",linewidth=2.5)[0] #gray
l4 = ax2.plot(x, results["final_model_accuracy"], color="tab:green", label="Accuracy of model computed by DAAISy", linestyle="dashdot",linewidth=2.5)[0] #green
plt.fill_between(x, np.array(results["final_model_accuracy"])-np.array(results["acc_std_dev"]), np.array(results["final_model_accuracy"])+np.array(results["acc_std_dev"]), alpha = 0.20, color="tab:green") #green
plt.fill_between(x, np.array(results["final_model_accuracy"]), np.array(results["initial_model_accuracy"]), alpha = 0.20, color="#884EA0")
if x_axis_enforce_markings:
plt.xticks(np.arange(0.0, 1.2, 0.2))
plt.yticks(np.arange(0.0, 1.05, 0.2))
plt.title(title[0].capitalize()+title[1:])
plt.tight_layout()
plt.savefig(file, bbox_inches='tight', pad_inches = 0)
def write_csv_results_increasing_observations(results, file):
results_to_plot = dict()
x = [int(float(key)) for key in results.keys()]
results_to_plot["queries_scratch"] = [results[num_obs]['queries_scratch'][0] for num_obs in x]
results_to_plot["queries"] = [results[num_obs]['queries'][0] for num_obs in x]
results_to_plot["initial_model_accuracy"] = [results[num_obs]["initial_model_accuracy"][0] for num_obs in x]
results_to_plot["final_model_accuracy"] = [results[num_obs]["final_model_accuracy"][0] for num_obs in x]
results_to_plot["queries_std_dev"] = [results[num_obs]["queries_std_dev"][0] for num_obs in x]
results_to_plot["acc_std_dev"] = [results[num_obs]["acc_std_dev"][0] for num_obs in x]
csvfile = open(file+'.csv', 'w')
csvwriter = csv.writer(csvfile)
fields = ["num_obs", "queries_scratch", "queries", "initial_model_accuracy","final_model_accuracy","queries_std_dev","acc_std_dev"]
csvwriter.writerow(fields)
rows = list(zip(x,results_to_plot["queries_scratch"],results_to_plot["queries"],results_to_plot["initial_model_accuracy"],results_to_plot["final_model_accuracy"],results_to_plot["queries_std_dev"],results_to_plot["acc_std_dev"]))
for row in rows:
csvwriter.writerow(row)
return x, results_to_plot
def read_csv_results_increasing_observations(csv_file):
df = pd.read_csv(csv_file)
results_to_plot = dict()
x = df["num_obs"].to_list()
results_to_plot["queries_scratch"] = df["queries_scratch"].to_list()
results_to_plot["queries"] = df["queries"].to_list()
results_to_plot["initial_model_accuracy"] = df["initial_model_accuracy"].to_list()
results_to_plot["final_model_accuracy"] = df["final_model_accuracy"].to_list()
results_to_plot["queries_std_dev"] = df["queries_std_dev"].to_list()
results_to_plot["acc_std_dev"] = df["acc_std_dev"].to_list()
return x, results_to_plot
def plot_for_increasing_observations(results, file_name, title):
SMALL_SIZE = 9
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=20) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)
plt.rc('font.sans-serif')
fig, ax1 = plt.subplots()
if results==None:
x, results_to_plot = read_csv_results_increasing_observations(file_name+".csv")
else:
x, results_to_plot = write_csv_results_increasing_observations(results, file_name+".csv")
l1 = ax1.plot(x, results_to_plot["queries_scratch"], color="1", label="Number of Queries by AIA")[0]
l2 = ax1.plot(x, results_to_plot["queries"], color="tab:blue", label="Number of Queries by DAAISy")[0] #blue
plt.fill_between(x, np.array(results_to_plot["queries"])-np.array(results_to_plot["queries_std_dev"]), np.array(results_to_plot["queries"])+np.array(results_to_plot["queries_std_dev"]), alpha = 0.20, color="tab:blue") #blue
ax2 = ax1.twinx()
l3 = ax2.plot(x, results_to_plot["initial_model_accuracy"], color="tab:gray", label="Accuracy of initial model", linestyle="dashdot")[0] #gray
l4 = ax2.plot(x, results_to_plot["final_model_accuracy"], color="tab:green", label="Accuracy of model computed by DAAISy", linestyle="dashdot")[0] #green
plt.fill_between(x, np.array(results_to_plot["final_model_accuracy"])-np.array(results_to_plot["acc_std_dev"]), np.array(results_to_plot["final_model_accuracy"])+np.array(results_to_plot["acc_std_dev"]), alpha = 0.20, color="tab:green") #green
plt.fill_between(x, np.array(results_to_plot["final_model_accuracy"]), np.array(results_to_plot["initial_model_accuracy"]), alpha = 0.20, color="#884EA0")
plt.yticks(np.arange(0.0, 1.05, 0.1))
plt.title(title[0].capitalize()+title[1:])
plt.tight_layout()
plt.savefig(file_name+".png", bbox_inches='tight', pad_inches = 0)
def plot_legend(results, file, title):
SMALL_SIZE = 9
MEDIUM_SIZE = 12
BIGGER_SIZE = 18
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE+2) # fontsize of the axes title
plt.rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=12) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)
plt.rc('font.sans-serif')
fig, ax1 = plt.subplots(figsize=(10,5))
x = results["num_pals_incorrect"]
l1 = ax1.plot(x, results["queries_scratch"], color="tab:red", label="Number of Queries by AIA")[0]
l2 = ax1.plot(x, results["queries"], color="tab:blue", label="Number of Queries by DAAISy")[0]
plt.fill_between(x, np.array(results["queries"])-np.array(results["queries_std_dev"]), np.array(results["queries"])+np.array(results["queries_std_dev"]), alpha = 0.20, color="tab:blue") #blue
plt.xlabel("Amount of drift")
plt.ylabel("Number of queries")
ax2 = ax1.twinx()
l3 = ax2.plot(x, results["initial_model_accuracy"], color="tab:gray", label="Accuracy of initial model", linestyle="dashdot")[0] #gray
l4 = ax2.plot(x, results["final_model_accuracy"], color="tab:green", label="Accuracy of model computed by DAAISy", linestyle="dashdot")[0] #green
plt.fill_between(x, np.array(results["final_model_accuracy"])-np.array(results["acc_std_dev"]), np.array(results["final_model_accuracy"])+np.array(results["acc_std_dev"]), alpha = 0.20, color="tab:green") #green
l5 = plt.fill_between(x, np.array(results["final_model_accuracy"]), np.array(results["initial_model_accuracy"]), alpha = 0.20, color="#884EA0")
plt.ylabel("Accuracy of model")
lines = [l1, l2, l5, l3, l4]
legends = ["Number of queries by AIA","Number of queries by DAAISy","Accuracy gained by DAAISy", "Accuracy of initial model", "Accuracy of model computed by DAAISy"]
plt.legend(lines, legends, bbox_to_anchor=(0.0, 1.0, 1.0, 1.0), loc=3, ncol=2, mode="expand", borderaxespad=0.5)
plt.gca().set_axis_off()
plt.tight_layout()
plt.savefig(file)
def get_init_domain_files(domain_name, generate_init_domains_type, interval):
init_domain_files = list()
num_incorrect_pals_list = list()
if generate_init_domains_type == 0:
interval = math.floor(domain_to_total_pals[domain_name]/num_pals)
for i in range(1,domain_to_total_pals[domain_name],interval):
num_incorrect_pals_list.append(i)
for j in range(num_files_each_pal):
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
elif generate_init_domains_type == 1:
interval = 1
for i in range(1,domain_to_total_pals_increased_applicability[domain_name], interval):
num_incorrect_pals_list.append(i)
for j in range(num_files_each_pal):
# num_incorrect_pals_list.append(i)
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
else:
for i in range(1,domain_to_total_pals[domain_name],interval):
num_incorrect_pals_list.append(i)
if i < domain_to_total_pals_increased_applicability[domain_name]:
for j in range(int(num_files_each_pal/2)):
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
for j in range(int(num_files_each_pal/2),int(num_files_each_pal)):
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
last_reduced_capability_num_dropped_pals = i
else:
for j in range(num_files_each_pal):
init_domain_files.append("domain_"+str(i)+"_"+str(j)+".pddl")
return num_incorrect_pals_list
def read_for_reduced(df, domain_to_total_pals, domain_name, num_incorrect_pals_list, n, num_files_each_pal):
final_results = dict()
final_results["num_pals_incorrect"] = list()
final_results["initial_model_accuracy"] = list()
final_results["final_model_accuracy_inc"] = list()
final_results["final_model_accuracy_red"] = list()
final_results["queries_scratch"] = list()
final_results["queries"] = list()
results = dict()
results["initial_accuracy"] = df["InitAccuracy"].tolist()[:n*num_files_each_pal]
results["final_avg_accuracy"] = df["FinalAccuracy"].tolist()[:n*num_files_each_pal]
results["queries_scratch"] = df["#UniqueQueriesAIA"].tolist()[:n]
results["queries"] = df["Final#UniqueQueries"].tolist()[:n*num_files_each_pal]
final_results["domain_to_total_pals"] = domain_to_total_pals[domain_name]
final_results["num_pals_incorrect"] = num_incorrect_pals_list[:n]
i = 0
for k in range(len(num_incorrect_pals_list[:n])):
final_results["initial_model_accuracy"].append(np.sum(results["initial_accuracy"][i:i+num_files_each_pal])/num_files_each_pal)
final_results["final_model_accuracy_inc"].append(np.sum(results["final_avg_accuracy"][i:i+int(num_files_each_pal/2)])/(num_files_each_pal/2))
final_results["final_model_accuracy_red"].append(np.sum(results["final_avg_accuracy"][i+int(num_files_each_pal/2):i+num_files_each_pal])/(num_files_each_pal/2))
final_results["queries_scratch"] = results["queries_scratch"]
final_results["queries"].append(np.sum(results["queries"][i:i+num_files_each_pal])/num_files_each_pal)
i += num_files_each_pal
return final_results
def read(df, npals, domain_name, num_incorrect_pals_list, num_files_each_pal):
final_results = dict()
final_results["num_pals_incorrect"] = list()
final_results["initial_model_accuracy"] = list()
final_results["final_model_accuracy"] = list()
final_results["acc_std_dev"] = list()
final_results["queries_std_dev"] = list()
final_results["queries_scratch"] = list()
final_results["queries"] = list()
results = dict()
results["initial_accuracy"] = df["InitAccuracy"].tolist()
results["final_avg_accuracy"] = df["FinalAccuracy"].tolist()
results["queries_scratch"] = df["#UniqueQueriesAIA"].tolist()
results["queries"] = df["Final#UniqueQueries"].tolist()
final_results["domain_to_total_pals"] = npals
final_results["num_pals_incorrect"] = num_incorrect_pals_list
i = 0
for k in range(len(num_incorrect_pals_list)):
final_results["initial_model_accuracy"].append(np.sum(results["initial_accuracy"][i:i+num_files_each_pal])/num_files_each_pal)
final_results["final_model_accuracy"].append(np.sum(results["final_avg_accuracy"][i:i+int(num_files_each_pal)])/(num_files_each_pal))
final_results["acc_std_dev"].append(np.std(results["final_avg_accuracy"][i:i+int(num_files_each_pal)]))
final_results["queries_scratch"].append(results["queries_scratch"][i:i+num_files_each_pal][0])
final_results["queries"].append(np.sum(results["queries"][i:i+num_files_each_pal])/num_files_each_pal)
final_results["queries_std_dev"].append(np.std(results["queries"][i:i+num_files_each_pal])/num_files_each_pal)
i += num_files_each_pal
return final_results
if __name__=="__main__":
domains = ["satellite","rovers","blocksworld","miconic","gripper","termes"]
filename = {"satellite":"satellite1643143805.6418498_0",\
"rovers": "rovers1643142163.6327097_0",\
"blocksworld": "blocksworld1643134731.9598577_0",\
"miconic": "miconic1643139738.103667_0",\
"gripper": "gripper1643137067.3590012_0",\
"termes": "termes1643148544.1542282_0"}
for domain_name in domains:
name = filename[domain_name]
generate_init_domains_type = int(name.split("_")[1])
csv_file = os.getcwd()+"/results/"+str(domain_name)+"/"+name
domain_to_total_pals = {'blocksworld': 52, 'gripper': 20, 'miconic': 36, 'satellite': 50, 'parking': 72, 'rovers': 402, 'termes':134}
# domain_to_total_pals_increased_applicability = {'blocksworld': 14, 'gripper':7, 'miconic':8, 'satellite': 13, 'rovers':43, 'parking':19, 'termes':33} #len(PAL_list_increased_applicability)-len(actions)
domain_to_total_pals_increased_applicability = {'blocksworld': 5, 'gripper':3, 'miconic':5, 'satellite': 9, 'rovers':36, 'parking':19, 'termes':26, 'freecell':81, 'logistics':25} #len(PAL_list_increased_applicability)-len(actions)
domains_mix_intervals = {'blocksworld': 2, 'gripper':1, 'miconic':1, 'satellite': 2, 'rovers':4, 'parking':2, 'termes':4}
npals = domain_to_total_pals[domain_name]
domain_to_num_sas = {'blocksworld': 10, 'gripper':10, 'miconic':10, 'satellite': 10, 'rovers':10, 'termes':10}
domain_to_num_files_each_pal = {"blocksworld":6,"gripper":6,"miconic":6,"satellite":6,"rovers":6,"termes":2} # 6 for all, 2 for freecell and termes
num_files_each_pal = domain_to_num_files_each_pal[domain_name]
num_pals = 20
n = 15
plot_for_reduced = False
num_incorrect_pals_list = get_init_domain_files(domain_name, generate_init_domains_type, domains_mix_intervals[domain_name])
if domain_name in ['satellite','parking','rovers','termes']:
fields = ["init_domain", "#TotalPALs", "(#)InitPALsIncorrect", "(#)PAsDropped", "(#)PALsDropped_noObs", "(#)FinalAvgPALsIncorrect", \
"#TotalActions","(#)InitActionsIncorrect","(#)ActionsObserved", "(#)CompleteActionsDropped","(#)FinalActionsIncorrect", \
"InitAccuracy", "FinalAccuracy", "#UniqueQueriesAIA", "Final#UniqueQueries", "#ValidModels"]
if domain_name in ["blocksworld","miconic"]:
fields = ["init_domain", "#TotalPALs", "(#)InitPALsIncorrect", "(#)PAsDropped", "(#)PALsDropped_noObs", "(#)FinalAvgPALsIncorrect", \
"#TotalActions","(#)InitActionsIncorrect","(#)ActionsObserved", "(#)MarkedChangedActions","(#)FinalActionsIncorrect", \
"InitAccuracy", "FinalAccuracy", "#UniqueQueriesAIA", "Final#UniqueQueries", "#ValidModels"]
if domain_name in ["gripper"]:
fields = ["init_domain", "#TotalPALs", "(#)InitPALsIncorrect", "(#)PALsDropped_noObs", "(#)FinalAvgPALsIncorrect", \
"#TotalActions","(#)InitActionsIncorrect","(#)ActionsObserved", "(#)MarkedChangedActions","(#)FinalActionsIncorrect", \
"InitAccuracy", "FinalAccuracy", "#UniqueQueriesAIA", "Final#UniqueQueries", "#ValidModels"]
fields = ["init_domain", "#TotalPALs", "(#)InitPALsIncorrect", "(#)PAsDropped", "(#)PALsDropped_noObs", "(#)FinalAvgPALsIncorrect", \
"#TotalActions","(#)InitActionsIncorrect","(#)ActionsObserved", "(#)CompleteActionsDropped","(#)FinalActionsIncorrect", \
"InitAccuracy", "FinalAccuracy", "#UniqueQueriesAIA", "Final#UniqueQueries", "#ValidModels"]
df = pd.read_csv(csv_file+".csv", usecols=fields)
if plot_for_reduced==False:
final_results = read(df, npals, domain_name, num_incorrect_pals_list, num_files_each_pal)
else:
final_results = read_for_reduced(df, domain_to_total_pals, domain_name, num_incorrect_pals_list, n, num_files_each_pal)
final_results["domain_to_total_pals"] = domain_to_total_pals[domain_name]
x_axis_enforce_markings = True
if generate_init_domains_type==1:
x_axis_enforce_markings = False
plot_1(final_results, csv_file+".png", domain_name[0].capitalize()+domain_name[1:]+" (#Pals = "+str(domain_to_total_pals[domain_name])+")",x_axis_enforce_markings)
| 18,277 | 61.170068 | 247 | py |
DAAISy | DAAISy-main/src/model_drift/__init__.py | from .pa_tuple import PATuple
from .generate_observations import ObservationGenerator
from .infer_valid_modes import ValidModesInference
from .agent_interrogation_interface import AgentInterrogationInterface
| 208 | 40.8 | 70 | py |
DAAISy | DAAISy-main/src/lattice/model_pddl.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
from collections import OrderedDict
from itertools import permutations, combinations, product
from src.config import *
class State:
def __init__(self, state, objects):
self.state = state
self.objects = objects
def __str__(self):
return str(self.state) + str(self.objects)
class Model(object):
"""
This class defines the AI planning model that we assume the model to have.
Each model is defined in terms of predicates and actions.
:param predicates: dictionary of predicates and their parameters
:type predicates: dict of (str, int)
:param actions: dictionary of actions (action, dict(predicate,[pre,eff]))
:type actions: dict of (str, dict(str, list))
For each predicate in an action, pre and eff are 0 or 1.
0 means, that predicate appears as a negative literal,
1 means, that predicate appears as a positive literal.
"""
def __init__(self, predicates, actions):
"""
This method creates a new instance of Model.
"""
self.predicates = {}
for p in predicates:
self.predicates[p] = predicates[p]
self.actions = {}
for a in actions:
self.actions[a] = actions[a]
self.discarded = False
def __eq__(self, other):
return (self.predicates == other.predicates) and \
(self.actions == other.actions)
def __hash__(self):
return hash(tuple(self.actions))
def __ne__(self, other):
return not self.__eq__(other)
def print_predicates(self):
"""
This method prints the details of the predicates of the model.
.. note::
| Output format will be:
| -------------------------Predicates-------------------------
| Predicate Name | Number of Parameters
| -------------------------------- -----------------------------------
:rtype: None
"""
print("\n\n------------------Predicates------------------\n")
print("Predicate Name | Number of Parameters")
print("----------------------------------------------")
space_to_leave = len("Predicate Name ")
for key, value in self.predicates.items():
print(key, end="")
length = len(str(key))
for i in range(space_to_leave - length):
print(" ", end="")
print("| " + str(value))
print("----------------------------------------------\n")
def print_actions(self):
"""
This method prints the details of the actions of the model.
.. note::
| Output format will be:
| ---------------Actions---------------
| Action Name | Predicates
| ------------------- ----------------------
:rtype: None
"""
print("--------------------Actions-------------------\n")
print("Action Name | Predicates")
print("----------------------------------------------")
space_to_leave = len("Predicate Name ")
for key, preds in self.actions.items():
print(key, end="")
length = len(str(key))
for i in range(space_to_leave - length):
print(" ", end="")
print("| pre :", end="")
print_comma = False
for pred, value in preds.items():
if not print_comma:
print_comma = True
else:
print(",", end="")
if value[0] == Literal.NEG:
print(" !" + str(pred), end="")
elif value[0] == Literal.POS:
print(" " + str(pred), end="")
else:
print_comma = False
print("")
for i in range(space_to_leave):
print(" ", end="")
print("| eff :", end="")
print_comma = False
for pred, value in preds.items():
if not print_comma:
print_comma = True
else:
print(",", end="")
if value[1] == Literal.NEG:
print(" !" + str(pred), end="")
elif value[1] == Literal.POS:
print(" " + str(pred), end="")
else:
print_comma = False
print("")
print("----------------------------------------------\n\n")
def print_model(self):
"""
This method prints the details of the model.
:rtype: None
"""
self.print_predicates()
self.print_actions()
def update_actions(self, new_actions):
"""
This method updates the mapping of actions to predicates.
:param new_actions:
:type new_actions:
:return: true for success, false otherwise
:rtype: bool
"""
for a in new_actions:
self.actions[a] = new_actions[a]
def add_predicates(self, predicates, action_pred_dict, actions):
"""
This method adds the predicates to the model's predicate list.
:param predicates: list of predicates to be removed
:type predicates: list of str
:param action_pred_dict:
:type action_pred_dict:
:param actions:
:type actions:
:rtype: None
"""
for p in predicates:
self.predicates[p] = predicates[p]
for a in actions:
all_poss_preds = action_pred_dict[a]
action_preds = list(filter(lambda x: p in x, all_poss_preds))
if p in action_preds:
self.actions[a].update({p: [Literal.ABS, Literal.ABS]})
def write_model_to_file(self, fd, domain_name, pred_type_mapping, action_parameters, objects=None):
"""
This method creates files.
:param fd: file descriptor of the pddl file in which model will be written
:type fd: file descriptor
:param domain_name: domain name of the model
:type domain_name: str
:param pred_type_mapping:
:type pred_type_mapping:
:param action_parameters:
:type action_parameters:
:param objects:
:type objects:
:rtype: None
"""
if objects is None:
objects = dict()
fd.write("(define (domain " + domain_name + ")\n")
fd.write("(:requirements :strips :typing :equality)\n")
# Typing
fd.write("(:types")
for t in objects.keys():
fd.write(" " + t)
fd.write(")\n")
# Predicates
fd.write("(:predicates ")
count = 0
preds_printed = []
for key, value in self.predicates.items():
params = ""
cnt = 0
pred_name = key.split("|")[0]
if pred_name in preds_printed:
continue
else:
preds_printed.append(pred_name)
if pred_name.split("_")[-1] in ["1", "2"]:
actual_pred_name_splits = pred_name.split("_")[0:-1]
actual_pred_name = '_'.join(actual_pred_name_splits)
else:
actual_pred_name = pred_name
for val in pred_type_mapping[actual_pred_name]:
params = params + " ?" + val[0] + str(cnt) + " - " + val
cnt += 1
if count > 0:
fd.write("\n")
for k in range(len("(:predicates ")):
fd.write(" ")
fd.write("(" + pred_name + params + ")")
count += 1
fd.write(")\n\n")
# Actions
for actionName, predicateDict in self.actions.items():
head = "(:action " + actionName + "\n" + " :parameters"
fd.write(head)
type_count = {}
param_ordering = []
for p in action_parameters[actionName]:
if p not in type_count.keys():
type_count[p] = 1
else:
type_count[p] = type_count[p] + 1
param_ordering.append(p + str(type_count[p]))
fd.write(" (")
head = ""
param_count = len(action_parameters[actionName])
for i in range(param_count):
if i > 0:
for k in range(len(" :parameters (")):
head += " "
head += "?" + param_ordering[i] + " - " + action_parameters[actionName][i] + "\n"
for k in range(len(" :parameters ")):
head += " "
head += ")\n"
fd.write(head)
fd.write(" :precondition (and")
equality_needed = False
if param_count > 1:
equality_needed = True
if equality_needed:
combs = combinations(list(range(0, param_count)), 2)
for c in combs:
fd.write("(not (= ")
for j in range(2):
i = c[j]
fd.write("?" + param_ordering[i])
if j == 0:
fd.write(" ")
else:
fd.write(")) ")
for predicate, value in predicateDict.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if t_value[0] != Literal.ABS:
param = " ("
if t_value[0] == Literal.NEG:
param += "not ("
elif t_value[0] == Literal.AN:
param += "0/- ("
elif t_value[0] == Literal.AP:
param += "0/+ ("
elif t_value[0] == Literal.NP:
param += "+/- ("
param += pred_name
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
print(p)
param += " ?" + param_ordering[int(p)]
param += ")"
if t_value[0] != Literal.ABS and t_value[0] != Literal.POS:
param += ")"
fd.write(param)
fd.write(")\n")
fd.write(" :effect (and")
for predicate, value in predicateDict.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
if value[1] != Literal.ABS:
param = " ("
if value[1] == Literal.NEG:
param += "not ("
param += pred_name
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
param += " ?" + param_ordering[int(p)]
param += ")"
if value[1] == Literal.NEG:
param += ")"
fd.write(param)
fd.write("))\n\n")
fd.write(")\n")
class Lattice(object):
"""
This class defines the lattice where each node (class LatticeNode)
is a collection of models (class Models).
"""
def __init__(self):
self.nodes = {}
# Refinement = 0 means precondition refined first
# Refinement = 1 means effects refined first
self.refinement = Location.EFFECTS
def add_node(self, node_id, node):
self.nodes[node_id] = node
class LatticeNode(object):
"""
This class defines the AI planning model that we have the model to have.
Each model is defined in terms of predicates and actions.
:param models: list of models
:type models: list of Model objects
:param predicates: dictionary of predicates and their parameters
:type predicates: dict of (str, int)
"""
def __init__(self, lattice, models, predicates, action_pred_dict=None):
"""
This method creates a new instance of Model.
"""
if action_pred_dict is None:
action_pred_dict = {}
self.models = models
self.predicates = list(predicates.keys())
self.id = hash(tuple(sorted(self.predicates)))
self.lattice = lattice
self.lattice.add_node(self.id, self)
self.action_pred_dict = action_pred_dict
def add_models(self, models):
temp_models = self.models
for i in range(len(temp_models)):
temp_models[i].discarded = False
for m in models:
discarded = m.discarded
m.discarded = False
if m in temp_models:
temp_models.remove(m)
m.discarded = discarded
self.models.append(m)
@staticmethod
def act_pred_mapping(action_list, ref, modes=(Literal.ABS, Literal.NEG, Literal.POS)):
pre_list = [[Literal.ABS]]
eff_list = [[Literal.ABS]]
if ref == Location.ALL or ref == Location.PRECOND:
pre_list = [list(i) for i in product(modes)]
if ref == Location.ALL or ref == Location.EFFECTS:
eff_list = [list(i) for i in product(modes)]
pre_eff_list = [list(i[0] + i[1]) for i in list(product(pre_list, eff_list))]
# Stores which action will have the predicate with what precondition and effect
action_mapping = []
pred_list = list(product(pre_eff_list, repeat=len(action_list)))
act_list = list(combinations(action_list, len(action_list)))
# Mapping of actions to predicates' variations
action_mapping.append(list(product(act_list, pred_list)))
return list(action_mapping)
@staticmethod
def generate_preds_for_action(predicate, action, pred_type_mapping, action_parameters):
for p in pred_type_mapping[predicate]:
if p not in action_parameters[action]:
return None
need_multiple_mapping = False
pred_type_count = {}
for t in action_parameters[action]:
if t not in pred_type_count.keys():
pred_type_count[t] = action_parameters[action].count(t)
else:
continue
if pred_type_count[t] > 1 and t in pred_type_mapping[predicate]:
need_multiple_mapping = True
if pred_type_mapping[predicate].count(t) > 1:
need_multiple_mapping = True
if not need_multiple_mapping:
updated_predicate = str(predicate)
for p in pred_type_mapping[predicate]:
if p in action_parameters[action]:
updated_predicate += "|" + str(action_parameters[action].index(p))
else:
print("Error")
exit(1)
return [updated_predicate]
else:
type_combination_dict = OrderedDict()
for t in pred_type_mapping[predicate]:
if pred_type_count[t] > 1 and t not in type_combination_dict.keys():
type_count_in_predicate = pred_type_mapping[predicate].count(t)
# Locations of type t in action_parameters[action]'s paramteres
pred_locations = [i for i, x in enumerate(action_parameters[action]) if x == t]
type_combinations = permutations(pred_locations, type_count_in_predicate)
type_combination_dict[t] = list(type_combinations)
final_combinations = list(product(*list(type_combination_dict.values())))
updated_predicate_list = []
for comb in final_combinations:
pred_type_count_temp = {}
updated_predicate = str(predicate)
to_remove = [] # to store preds like on|0|0
for p in pred_type_mapping[predicate]:
if p not in pred_type_count_temp.keys():
pred_type_count_temp[p] = 0
else:
pred_type_count_temp[p] = pred_type_count_temp[p] + 1
if p not in type_combination_dict.keys():
updated_predicate += "|" + str(action_parameters[action].index(p))
if pred_type_mapping[predicate].count(p) > 1:
to_remove.append(updated_predicate)
else:
index_to_search = list(type_combination_dict.keys()).index(p)
updated_predicate += "|" + str(comb[index_to_search][pred_type_count_temp[p]])
updated_predicate_list.append(updated_predicate)
for r in to_remove:
if r in updated_predicate_list:
updated_predicate_list.remove(r)
return updated_predicate_list
def get_specific_children(self, model, predicate, ref, action, modes):
child_predicates = self.predicates
child_predicates.append(predicate)
child_id = hash(tuple(sorted(child_predicates)))
child_models = []
action_mapping = self.act_pred_mapping([action], ref, modes)
for i in action_mapping:
for actionNames, mappings in i:
new_child = copy.deepcopy(model)
new_child.add_predicates({predicate: 0}, self.action_pred_dict, [action])
_update_actions = {}
for j in range(len(actionNames)):
action_predicates = copy.deepcopy(model.actions[actionNames[j]])
if predicate not in action_predicates.keys():
action_predicates[predicate] = mappings[j]
else:
temp_mapping = action_predicates[predicate]
if ref == Location.PRECOND:
temp_mapping[0] = mappings[j][0]
elif ref == Location.EFFECTS:
temp_mapping[1] = mappings[j][1]
action_predicates[predicate] = temp_mapping
if action_predicates[predicate] in ignore_list or model.discarded:
new_child.discarded = True
_update_actions[actionNames[j]] = action_predicates
new_child.update_actions(_update_actions)
child_models.append(new_child)
# Assuming that we will never need child
if child_id in self.lattice.nodes.keys():
child_node = self.lattice.nodes[child_id]
child_node.add_models(child_models)
else:
pred_dict = {}
for p in child_predicates:
pred_dict[p] = 0
child_node = LatticeNode(self.lattice, child_models, pred_dict, self.action_pred_dict)
return child_node
def get_model_partitions(self, model, predicate, ref, action, modes):
child_node = self.get_specific_children(model, predicate, ref, action, modes)
child_models = child_node.models
return child_models
| 19,309 | 34.892193 | 103 | py |
DAAISy | DAAISy-main/src/lattice/__init__.py | from .model_pddl import Lattice
from .model_pddl import LatticeNode
from .model_pddl import Model
from .model_pddl import State
| 128 | 24.8 | 35 | py |
DAAISy | DAAISy-main/src/utils/parse_files.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
import itertools
import os
import subprocess
from collections import OrderedDict
import numpy as np
from .parser import PDDLDomainParser, structs
from .translate import pddl_fd as pddl
from .translate import pddl_parser
from ..config import *
from . import FileUtils
def extract_task(domain_file_path, problem_file_path):
# Extract the domain specific args.
domain_pddl = pddl_parser.pddl_file.parse_pddl_file(
"domain", domain_file_path)
domain_name, \
domain_requirements, \
types, \
type_dict, \
constants, \
predicates, \
predicate_dict, \
functions, \
actions, \
axioms = pddl_parser.parsing_functions.parse_domain_pddl(
domain_pddl)
task_pddl = pddl_parser.pddl_file.parse_pddl_file(
"task", problem_file_path)
task_name, \
task_domain_name, \
task_requirements, \
objects, \
init, \
goal, \
use_metric = pddl_parser.parsing_functions.parse_task_pddl(
task_pddl, type_dict, predicate_dict)
assert domain_name == task_domain_name
requirements = pddl.Requirements(sorted(set(
domain_requirements.requirements +
task_requirements.requirements)))
objects = constants + objects
pddl_parser.parsing_functions.check_for_duplicates(
[o.name for o in objects],
errmsg="error: duplicate object %r",
finalmsg="please check :constants and :objects definitions")
#############################
init += [pddl.Atom("=", (obj.name, obj.name))
for obj in objects]
#############################
task = pddl.Task(domain_name,
task_name,
requirements,
types,
objects,
predicates,
functions,
init,
goal,
actions,
axioms,
use_metric)
return task
def check_nested(test_dict):
for key1, val in test_dict.items():
for key2 in test_dict.keys():
if key2 in val and key1 != 'object':
return True, key2, key1
return False, None, None
class PredicateDetails:
def __init__(self, literal, param_dict, predTypeMapping):
name_set = False
self.param_matching = OrderedDict()
try:
for param in literal.variables:
self.param_matching[param.name] = param_dict[param.name]
except KeyError as e:
print("KeyError")
self.isnegative = literal.is_anti
if literal.is_negative == True and literal.is_anti == False:
self.isnegative = True
for pred in predTypeMapping:
# == instead of in as it causes error when predicates of the form p and not_p are present
if literal.predicate.name==pred and sorted(list(self.param_matching.values())) == sorted(list(predTypeMapping[pred])):
self.name = pred
name_set = True
if not name_set:
self.name = literal.predicate.name
print("pred not found")
def __str__(self):
return self.name + "(" + str(self.param_matching) + ")" + str(self.isnegative)
class ActionDetails:
def __init__(self, action, param_types, predTypeMapping):
self.name = action.name
self.precondition = []
self.effects = []
self.param_matching = OrderedDict()
self.precondition_literal_names = []
self.add_effects_literal_names = []
self.del_effects_literal_names = []
for i, param in enumerate(action.params):
self.param_matching[param.name] = param_types[i]
try:
if isinstance(action.preconds, structs.LiteralConjunction):
[self.precondition.append(PredicateDetails(lit, self.param_matching, predTypeMapping)) for lit in
action.preconds.literals]
[self.precondition_literal_names.append(p.name) for p in self.precondition]
elif isinstance(action.preconds, structs.Literal):
self.precondition.append(PredicateDetails(action.preconds, self.param_matching, predTypeMapping))
else:
print("Some other action precondition type")
except AttributeError as e:
print("Attribute Error!")
for lit in action.effects.literals:
self.effects.append(PredicateDetails(lit, self.param_matching, predTypeMapping))
for p in self.effects:
if p.isnegative:
self.del_effects_literal_names.append(p.name)
else:
self.add_effects_literal_names.append(p.name)
def __str__(self):
return self.name + "\nParams: [\n" + str(self.params) + "\n]\n Precond:[\n " + str(
self.precondition) + "\n] Add_effects:[\n" + str(self.add_effects) + "\n] Del_effects:[\n" + str(
self.del_effects)
def generate_ds(domain_file, problem_file):
task = extract_task(domain_file, problem_file)
domain_parser = PDDLDomainParser(domain_file)
domain_parser._parse_domain()
##########pddlgym's parser###############
predicates = domain_parser.predicates
operators = domain_parser.operators
#########################################
predTypeMapping = {}
absActParamType = {}
reverse_types = {}
types = {}
objects = {}
init_state = []
################reverse typ mapping#######################
for typ in task.types:
if typ.basetype_name != None:
if typ.basetype_name not in reverse_types.keys():
reverse_types[typ.basetype_name] = [typ.name]
else:
reverse_types[typ.basetype_name].append(typ.name)
for obj in task.objects:
if str(obj.type_name) not in objects.keys():
objects[str(obj.type_name)] = [str(obj.name)]
else:
objects[str(obj.type_name)].append(str(obj.name))
# check for heirarchy....should be using a tree for this instead####
while True:
is_nested, nested_key, parent_key = check_nested(reverse_types)
if is_nested:
reverse_types[parent_key].remove(nested_key)
reverse_types[parent_key].extend(reverse_types[nested_key])
else:
break
####################predTypeMapping######################
for pred_name in predicates.keys():
pred = predicates[pred_name]
args = pred.var_types
nested = False
for i, arg in enumerate(args):
if arg in reverse_types.keys():
args[i] = reverse_types[arg]
nested = True
else:
args[i] = [args[i]]
if nested:
args = itertools.product(*args)
for i, arg_p in enumerate(args):
predTypeMapping[pred_name + '-' + str(i + 1)] = arg_p
else:
predTypeMapping[pred_name] = list(itertools.chain.from_iterable(args))
####################init_state###############################
for item in task.init:
if item.predicate == "=":
continue
temp_pred = copy.deepcopy(item.predicate)
if len(item.args) > 0:
for _arg in item.args:
temp_pred += "|" + _arg
init_state.append(temp_pred)
####################action_parameters########################
action_parameters = {}
action_details = {}
for op_name in operators:
op = operators[op_name]
op_params = [];
[op_params.append(i.var_type) for i in op.params]
nested = False
for i, arg in enumerate(op_params):
if arg in reverse_types.keys():
op_params[i] = reverse_types[arg]
nested = True
heirarchial_types = True
else:
op_params[i] = [op_params[i]]
if nested:
args = itertools.product(*op_params)
for i, arg_p in enumerate(args):
action_parameters[op_name + '-' + str(i + 1)] = arg_p
action_details[op_name + '-' + str(i + 1)] = ActionDetails(op, arg_p, predTypeMapping)
else:
action_parameters[op_name] = list(itertools.chain.from_iterable(op_params))
action_details[op_name] = ActionDetails(op, list(itertools.chain.from_iterable(op_params)), predTypeMapping)
##########################abstract_model#######################
abstract_model = {}
for action in action_parameters.keys():
abstract_model[action] = {}
########################agent_model#################
agent_model = {}
for action_name, action in action_details.items():
agent_model[action_name] = {}
action_params = list(action.param_matching.values())
for pred, pred_params in predTypeMapping.items():
try:
if len(set(pred_params).difference(set(action_params))) == 0:
# check for multiple presence
param_indices = []
[param_indices.append(list(np.where(np.array(action_params) == p))[0].tolist()) for p in
pred_params]
combinations = list(itertools.product(*param_indices))
if len(combinations) > 1:
for c in combinations:
if len(c) != len(set(c)):
continue
agent_model[action_name][pred + '|' + "|".join(map(str, c))] = [Literal.ABS, Literal.ABS]
for l in action.precondition:
if l.name == pred:
action_local_params = [list(action.param_matching.keys())[i] for i in list(c)]
if action_local_params == list(l.param_matching.keys()):
if l.isnegative:
agent_model[action_name][pred + '|' + "|".join(map(str, c))][
0] = Literal.NEG
else:
agent_model[action_name][pred + '|' + "|".join(map(str, c))][
0] = Literal.POS
for l in action.effects:
if l.name == pred:
action_local_params = [list(action.param_matching.keys())[i] for i in list(c)]
if action_local_params == list(l.param_matching.keys()):
if l.isnegative:
agent_model[action_name][pred + '|' + "|".join(map(str, c))][
1] = Literal.NEG
else:
agent_model[action_name][pred + '|' + "|".join(map(str, c))][
1] = Literal.POS
else:
if len(combinations[0]) != len(set(combinations[0])):
continue
str_app = "|".join(map(str, combinations[0]))
if str_app:
modified_pred = pred + '|' + str_app
else:
modified_pred = copy.deepcopy(pred)
agent_model[action_name][modified_pred] = [Literal.ABS, Literal.ABS]
for l in action.precondition:
if l.name == pred:
if l.isnegative:
agent_model[action_name][modified_pred][0] = Literal.NEG
else:
agent_model[action_name][modified_pred][0] = Literal.POS
for l in action.effects:
if l.name == pred:
if l.isnegative:
agent_model[action_name][modified_pred][1] = Literal.NEG
else:
agent_model[action_name][modified_pred][1] = Literal.POS
except KeyError as e:
print("Key Error")
return action_parameters, predTypeMapping, agent_model, abstract_model, objects, reverse_types, init_state, task.domain_name
def get_plan(domain_file, problem_file):
"""
This method calls the planner.
The planner can be either FF Planner (ff) or Madagascar (mg).
It needs to be set in config.py in the root directory.
:param domain_file: domain file (operator file) for the planner
:type domain_file: str
:param problem_file: problem file (fact file) for the planner
:type problem_file: str
:param result_file: result file to store output of the planner
:type result_file: str
:rtype: list
"""
plan = ""
if PLANNER == "FF":
result_file = temp_output_file
param = FF_PATH + "ff"
param += " -o " + domain_file
param += " -f " + problem_file
param += " > " + result_file
p = subprocess.Popen([param], shell=True)
p.wait()
plan = FileUtils.get_plan_from_file(result_file)
elif PLANNER == "FD":
cmd = FD_PATH + 'fast-downward.py ' + domain_file + ' ' + problem_file + ' --search "astar(lmcut())"'
plan = os.popen(cmd).read()
proc_plan = plan.split('\n')
cost = [i for i, s in enumerate(proc_plan) if 'Plan cost:' in s]
if 'Solution found!' not in proc_plan:
print("No Solution")
return [], 0
plan = proc_plan[proc_plan.index('Solution found!') + 2: cost[0] - 1]
return plan
| 13,975 | 39.393064 | 130 | py |
DAAISy | DAAISy-main/src/utils/file_utils.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
from itertools import combinations
from src.config import *
class FileUtils(object):
@classmethod
def get_plan_from_file(cls, result_file):
"""
This method extracts the plan from the output of the planner.
The planner can be either FF Planner (ff) or Madagascar (mg).
It needs to be set in config.py in the root directory.
:param result_file: result file where output of the planner is stored.
:type result_file: str
:return: Plan as a list of action names
:rtype: list of str
"""
plan = []
if PLANNER == "FF":
for line in open(result_file):
if 'STEP' in line:
values = line.split()
if (values[2] != "REACH-GOAL"):
plan.append(("|".join(values[2:])).lower())
elif PLANNER == "FD":
for line in open(result_file):
if ';' not in line:
if line == "\n":
continue
values = line.split("(")
values = values[1].split(")")
plan.append(values[0].rstrip().lower())
return plan
def writeProblemToFile(ar_query, fd, domain_name, problemName, useDummyPreds, objects, pred_type_mapping):
"""
This method creates files.
:param fd: file descriptor of the pddl file in which problem will be written
:type fd: file descriptor
:param domain_name: domain name of the model
:type domain_name: str
:rtype: None
"""
init_state = ar_query.init_state
model = ar_query.model1
fd.write("(define (problem " + problemName + ")\n")
####### Domain #######
fd.write(" (:domain " + domain_name + ")\n")
####### Objects #######
fd.write(" (:objects ")
k = 0
for t, vals in objects.items():
if len(vals) == 0:
# This case happens with domains like logistics
# Here physobj has no actual object
continue
if k > 0:
for k in range(len(" (:objects ")):
fd.write(" ")
for v in vals:
fd.write(v + str(" "))
fd.write(" - " + t + " ")
k += 1
fd.write(")\n")
fd.write(" (:init ")
count = 0
for p, value in init_state.items():
for vals in value:
params = ""
for j in range(len(vals)):
if j > 0:
params += " "
params += vals[j]
if count > 0:
fd.write("\n")
for k in range(len(" (:init ")):
fd.write(" ")
count += 1
if (p in init_state):
fd.write("(" + p + "_1 " + params + ")")
if count > 0:
fd.write("\n")
for k in range(len(" (:init ")):
fd.write(" ")
fd.write("(" + p + "_2 " + params + ")")
else:
fd.write("(not (" + p + "_1 " + params + "))")
if count > 0:
fd.write("\n")
for k in range(len(" (:init ")):
fd.write(" ")
fd.write("(not (" + p + "_2 " + params + "))")
fd.write("\n")
fd.write(" )\n")
####### Goal #######
max_type_count = {}
preds_name = [i.split("|", 1)[0] for i in model.predicates.keys()]
preds_name = list(set(preds_name))
# print(preds_name)
################
# any_object = True when there is at least one object in the domain.
# This helps in avoiding situations where:
# :goal (exists (
# )
# Hence if no object, set any_object to False to avoid printing exists()
################
any_object = False
for t in objects.keys():
max_count = 0
for k, p in pred_type_mapping.items():
if k in preds_name:
max_count = max(max_count, p.count(t))
max_type_count[t] = max_count
if max_count > 0:
any_object = True
if any_object == True:
fd.write(" (:goal (exists (\n")
else:
fd.write(" (:goal \n")
for t in objects.keys():
param = ""
for k in range(len(" (:goal (exists ")):
param += " "
for i in range(max_type_count[t]):
param += " ?" + str(t) + str(i + 1)
param += " - " + str(t) + "\n"
if max_type_count[t] != 0:
fd.write(param)
if any_object == True:
for k in range(len(" (:goal (exists ")):
fd.write(" ")
fd.write(")\n")
for k in range(len(" (:goal (exists ")):
fd.write(" ")
fd.write("(or\n")
preds_covered = []
for k_, val in model.predicates.items():
key = k_.split("|")[0]
if key in preds_covered:
continue
else:
preds_covered.append(key)
# print("key = "+str(key))
for k in range(len(" (:goal (exists (or")):
fd.write(" ")
param = "(and "
param += "(" + key + "_1 "
type_count = {}
if key != 'empty_init':
for v in pred_type_mapping[key]:
if v not in type_count.keys():
type_count[v] = 1
else:
type_count[v] = type_count[v] + 1
param += " ?" + v + str(type_count[v])
param += ")\n"
fd.write(param)
for k in range(len(" (:goal (exists (or (and")):
fd.write(" ")
param = ""
param += "(not (" + key + "_2 "
type_count = {}
if key != 'empty_init':
for v in pred_type_mapping[key]:
if v not in type_count.keys():
type_count[v] = 1
else:
type_count[v] = type_count[v] + 1
param += " ?" + v + str(type_count[v])
param += "))\n"
fd.write(param)
for k in range(len(" (:goal (exists (or")):
fd.write(" ")
fd.write(")\n")
for k in range(len(" (:goal (exists (or")):
fd.write(" ")
param = "(and "
param += "(" + key + "_2 "
type_count = {}
if key != 'empty_init':
for v in pred_type_mapping[key]:
if v not in type_count.keys():
type_count[v] = 1
else:
type_count[v] = type_count[v] + 1
param += " ?" + v + str(type_count[v])
param += ")\n"
fd.write(param)
for k in range(len(" (:goal (exists (or (and")):
fd.write(" ")
param = "(not (" + key + "_1 "
type_count = {}
if key != 'empty_init':
for v in pred_type_mapping[key]:
if v not in type_count.keys():
type_count[v] = 1
else:
type_count[v] = type_count[v] + 1
param += " ?" + v + str(type_count[v])
param += "))\n"
fd.write(param)
for k in range(len(" (:goal (exists (or")):
fd.write(" ")
fd.write(")\n")
if useDummyPreds == True:
for k in range(len(" (:goal (exists (or")):
fd.write(" ")
fd.write("(and (dummy_pred_1)\n")
for k in range(len(" (:goal (exists (or (and")):
fd.write(" ")
fd.write("(not (dummy_pred_2))\n")
for k in range(len(" (:goal (exists (or")):
fd.write(" ")
fd.write(")\n")
for k in range(len(" (:goal (exists (or")):
fd.write(" ")
if any_object == True:
for k in range(len(" (:goal (exists ")):
fd.write(" ")
fd.write(")\n")
for k in range(len(" (:goal ")):
fd.write(" ")
fd.write(")\n")
fd.write(" )\n")
fd.write(")\n")
def writePlanToFile(ar_query, fd, init_state, domain_name, problemName, objects):
"""
This method creates files.
:param fd: file descriptor of the pddl file in which plan problem will be written
:type fd: file descriptor
:param domain_name: domain name of the model
:type domain_name: str
:rtype: None
"""
plan = ar_query.plan
fd.write("(define (problem " + problemName + ")\n")
fd.write("(:domain " + domain_name + ")\n")
fd.write("(:init ")
count = 0
for p in init_state:
params = ""
if count > 0:
fd.write("\n")
for k in range(len("(:init ")):
fd.write(" ")
fd.write("(" + p + params + ")")
count += 1
fd.write(")\n\n")
fd.write("(:plan\n")
for k in plan:
for j in range(len("(:plan")):
fd.write(" ")
param = ""
param = " (" + k + ")\n"
fd.write(param)
param = ""
fd.write(")\n")
fd.write(")\n")
def add_unknown_predicate(model1, model2, pal_tuple_dict, pal):
temp_actions_m1 = copy.deepcopy(model1.actions)
for actionName, predicateDict_m1 in temp_actions_m1.items():
if (actionName, pal[1], Location.PRECOND) not in pal_tuple_dict.keys():
# This predicate and action might be incompatible
continue
predicateDict_m1['unknown'] = [Literal.POS, Literal.POS]
if pal_tuple_dict[(actionName, pal[1], Location.PRECOND)] == True:
predicateDict_m1['unknown'][0] = Literal.ABS
if pal_tuple_dict[(actionName, pal[1], Location.EFFECTS)] == True:
predicateDict_m1['unknown'][1] = Literal.NEG
# Remove unknown from current pal tuple's a,l
if pal[2] == Location.PRECOND:
temp_actions_m1[pal[0]]['unknown'][int(pal[2]) - 1] = Literal.ABS
elif pal[2] == Location.EFFECTS:
temp_actions_m1[pal[0]]['unknown'][int(pal[2]) - 1] = Literal.NEG
temp_actions_m2 = copy.deepcopy(model2.actions)
for actionName, predicateDict_m2 in temp_actions_m2.items():
if (actionName, pal[1], Location.PRECOND) not in pal_tuple_dict.keys():
# This predicate and action might be incompatible
continue
predicateDict_m2['unknown'] = [Literal.POS, Literal.POS]
if pal_tuple_dict[(actionName, pal[1], Location.PRECOND)] == True:
predicateDict_m2['unknown'][0] = Literal.ABS
if pal_tuple_dict[(actionName, pal[1], Location.EFFECTS)] == True:
predicateDict_m2['unknown'][1] = Literal.NEG
if pal[2] == Location.PRECOND:
temp_actions_m2[pal[0]]['unknown'][int(pal[2]) - 1] = Literal.ABS
elif pal[2] == Location.EFFECTS:
temp_actions_m2[pal[0]]['unknown'][int(pal[2]) - 1] = Literal.NEG
return temp_actions_m1, temp_actions_m2
def write(var, txt):
var += txt
return var
@classmethod
def write_domain_to_file(cls, fd, domain_name, objects, pred_type_mapping, action_parameters, model1, model2,
pal_tuple_dict, pal):
"""
This method creates files.
:param fd: file descriptor of the pddl file in which model will be written
:type fd: file descriptor
:param domain_name: domain name of the model
:type domain_name: str
:rtype: None
"""
use_unknown = True
fd.write("(define (domain " + domain_name + ")\n")
fd.write("(:requirements :strips :typing :conditional-effects :equality :negative-preconditions)\n")
####### Typing #######
fd.write("(:types")
for t in objects.keys():
fd.write(" " + t)
fd.write(")\n")
fd.write("(:predicates ")
count = 0
preds_printed = []
for key, value in model1.predicates.items():
params = ""
cnt = 0
pred_name = key.split("|")[0]
if pred_name != 'empty_init':
for val in pred_type_mapping[pred_name]:
params = params + " ?" + val[0] + str(cnt) + " - " + val
cnt += 1
if count > 0:
fd.write("\n")
for k in range(len("(:predicates ")):
fd.write(" ")
if pred_name not in preds_printed:
preds_printed.append(pred_name)
fd.write("(" + pred_name + "_1 " + params + ")")
fd.write("(" + pred_name + "_2 " + params + ")")
count += 1
fd.write("\n")
if use_unknown:
# ADD UNKNOWN
for k in range(len("(:predicates ")):
fd.write(" ")
fd.write("(unknown_1)")
fd.write("(unknown_2)\n")
for k in range(len("(:predicates ")):
fd.write(" ")
fd.write("(dummy_pred_1)")
fd.write("(dummy_pred_2)")
fd.write(")\n\n")
# Needed to copy because we will add key unknown later.
temp_actions_m1, temp_actions_m2 = FileUtils.add_unknown_predicate(model1, model2, pal_tuple_dict, pal)
for actionName, predicateDict_m1 in temp_actions_m1.items():
head = "(:action " + actionName + "\n" + " :parameters"
fd.write(head)
count = 0
type_count = {}
param_ordering = []
for p in action_parameters[actionName]:
if p not in type_count.keys():
type_count[p] = 1
else:
type_count[p] = type_count[p] + 1
param_ordering.append(p + str(type_count[p]))
fd.write(" (")
head = ""
param_count = len(action_parameters[actionName])
for i in range(param_count):
if i > 0:
for k in range(len(" :parameters (")):
head += " "
head += "?" + param_ordering[i] + " - " + action_parameters[actionName][i] + "\n"
for k in range(len(" :parameters ")):
head += " "
head += ")\n"
fd.write(head)
count = -1
########### Write Precondition ###########
fd.write(" :precondition ")
precond = ""
precond_str = ""
has_something = False
equality_needed = False
if param_count > 1:
equality_needed = True
if equality_needed:
# Ensure none of the parameters are equal to each other
combs = combinations(list(range(0, param_count)), 2)
precond_str = FileUtils.write(precond_str, "(and ")
for c in combs:
has_something = True
precond_str = FileUtils.write(precond_str, "(not (= ")
for j in range(2):
i = c[j]
precond_str = FileUtils.write(precond_str, "?" + param_ordering[i])
if (j == 0):
precond_str = FileUtils.write(precond_str, " ")
else:
precond_str = FileUtils.write(precond_str, ")) ")
precond_str = FileUtils.write(precond_str, "\n")
for k in range(len(" :precondition (and ")):
precond_str = FileUtils.write(precond_str, " ")
# Write precondition of M1 and Precondition of M2 in OR
# This ensures the models are distinguished if only one model
# can execute this action
# precond_str = FileUtils.write(precond_str, "(or \n")
# for k in range(len(" :precondition (and (or ")):
# precond_str = FileUtils.write(precond_str, " ")
if has_something == True:
precond += precond_str
precond_str = ""
# Write predicate 1
head_m1 = ""
not_head_m1 = ""
for predicate, value in predicateDict_m1.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if (t_value[0] == Literal.AN or t_value[0] == Literal.AP):
t_value[0] = Literal.ABS
elif (t_value[0] == Literal.NP):
t_value[0] = Literal.POS
if (t_value[0] != Literal.ABS) and not (
use_unknown and pal[0] == actionName and pal[1] == predicate):
param = "("
not_param = "("
# has_something = True
if (t_value[0] == Literal.NEG):
param += "not ("
if (t_value[0] == Literal.POS):
not_param += "not ("
param += pred_name + "_1"
not_param += pred_name + "_1"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
not_param += " ?" + param_ordering[int(p)]
param += ")"
not_param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.POS):
param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.NEG):
not_param += ")"
for k in range(len(" :precondition (and (or (and ")):
head_m1 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m1 += " "
head_m1 += param + "\n"
not_head_m1 += not_param + "\n"
# for k in range(len(" :precondition (and ")):
# head_m1 += " "
# not_head_m1 += " "
elif (use_unknown and pal[0] == actionName and pal[1] == predicate):
if t_value[0] != Literal.ABS:
# Add +,-,unkn in or
param = "(or ("
not_param = "(or ("
if (t_value[0] == Literal.NEG):
param += "not ("
if (t_value[0] == Literal.POS):
not_param += "not ("
param += pred_name + "_1"
not_param += pred_name + "_1"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
not_param += " ?" + param_ordering[int(p)]
param += ")"
not_param += ")"
for k in range(len(" :precondition (and (or (and ")):
head_m1 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m1 += " "
if (t_value[0] != Literal.ABS and t_value[0] != Literal.POS):
param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.NEG):
not_param += ")"
head_m1 += param + "\n"
not_head_m1 += not_param + "\n"
for k in range(len(" :precondition (and (or (and (or ")):
head_m1 += " "
for k in range(len(" :precondition (and (or (and (or (or ")):
not_head_m1 += " "
head_m1 += "(unknown_1)\n"
not_head_m1 += "(unknown_1)\n"
for k in range(len(" :precondition (and (or (and ")):
head_m1 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m1 += " "
head_m1 += ")\n"
not_head_m1 += ")\n"
head_m2 = ""
not_head_m2 = ""
predicateDict_m2 = temp_actions_m2[actionName]
for predicate, value in predicateDict_m2.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if (t_value[0] == Literal.AN or t_value[0] == Literal.AP):
t_value[0] = Literal.ABS
elif (t_value[0] == Literal.NP):
t_value[0] = Literal.POS
if (t_value[0] != Literal.ABS) and not (
use_unknown and pal[0] == actionName and pal[1] == predicate):
not_param = "("
param = "("
# has_something = True
if (t_value[0] == Literal.NEG):
param += "not ("
if (t_value[0] == Literal.POS):
not_param += "not ("
param += pred_name + "_2"
not_param += pred_name + "_2"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
not_param += " ?" + param_ordering[int(p)]
param += ")"
not_param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.POS):
param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.NEG):
not_param += ")"
for k in range(len(" :precondition (and (or (and ")):
head_m2 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m2 += " "
head_m2 += param + "\n"
not_head_m2 += not_param + "\n"
elif (use_unknown and pal[0] == actionName and pal[1] == predicate):
if t_value[0] != Literal.ABS:
# Add +,-,unkn in or
param = "(or ("
not_param = "(or ("
if (t_value[0] == Literal.NEG):
param += "not ("
if (t_value[0] == Literal.POS):
not_param += "not ("
param += pred_name + "_2"
not_param += pred_name + "_2"
if len(pred_split) > 1:
pred_params = pred_split[1:]
for p in pred_params:
# print(p)
param += " ?" + param_ordering[int(p)]
not_param += " ?" + param_ordering[int(p)]
param += ")"
not_param += ")"
for k in range(len(" :precondition (and (or (and ")):
head_m2 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m2 += " "
if (t_value[0] != Literal.ABS and t_value[0] != Literal.POS):
param += ")"
if (t_value[0] != Literal.ABS and t_value[0] != Literal.NEG):
not_param += ")"
head_m2 += param + "\n"
not_head_m2 += not_param + "\n"
for k in range(len(" :precondition (and (or (and (or ")):
head_m2 += " "
for k in range(len(" :precondition (and (or (and (or (or ")):
not_head_m2 += " "
head_m2 += "(unknown_2)\n"
not_head_m2 += "(unknown_2)\n"
for k in range(len(" :precondition (and (or (and ")):
head_m2 += " "
for k in range(len(" :precondition (and (or (and (or ")):
not_head_m2 += " "
head_m2 += ")\n"
not_head_m2 += ")\n"
precond += precond_str
if (head_m1 + head_m2).strip() != "":
fd.write(precond + "\n(or \n")
for k in range(len(" :precondition (and (or ")):
fd.write(" ")
if head_m1 != "":
fd.write("(and " + head_m1 + ")")
if head_m2 != "":
fd.write("(and " + head_m2 + ")")
fd.write(")\n")
elif equality_needed == False:
fd.write("()\n")
if equality_needed == True:
fd.write("(" + precond + ")\n")
fd.write(" :effect (and")
# When (prec(m1)) (eff(m1))
fd.write(" (when ")
if (head_m1 + head_m2).strip() != "":
fd.write("(and\n")
if head_m1 != "":
fd.write(head_m1)
if head_m2 != "":
fd.write(head_m2)
for k in range(len(" :effect (and (when ")):
fd.write(" ")
fd.write(")\n")
else:
fd.write("()\n")
for k in range(len(" :effect (and (when ")):
fd.write(" ")
param1 = ""
for predicate, value in predicateDict_m1.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if (t_value[1] == Literal.AN or t_value[1] == Literal.AP):
t_value[1] = Literal.ABS
elif (t_value[1] == Literal.NP):
t_value[1] = Literal.POS
for k in range(len(" :precondition (and (or (and ")):
param1 += " "
if (t_value[1] != Literal.ABS):
param1 += "("
if (t_value[1] == Literal.NEG):
param1 += "not ("
param1 += pred_name + "_1"
if len(pred_split) > 1:
pred_param1s = pred_split[1:]
for p in pred_param1s:
# print(p)
param1 += " ?" + param_ordering[int(p)]
param1 += ")"
if (t_value[1] != Literal.ABS and t_value[1] != Literal.POS):
param1 += ")"
param2 = ""
for predicate, value in predicateDict_m2.items():
pred_split = predicate.split("|")
pred_name = pred_split[0]
t_value = copy.deepcopy(value)
if (t_value[1] == Literal.AN or t_value[1] == Literal.AP):
t_value[1] = Literal.ABS
elif (t_value[1] == Literal.NP):
t_value[1] = Literal.POS
for k in range(len(" :precondition (and (or (and ")):
param2 += " "
if (t_value[1] != Literal.ABS):
param2 += "("
if (t_value[1] == Literal.NEG):
param2 += "not ("
param2 += pred_name + "_2"
if len(pred_split) > 1:
pred_param2s = pred_split[1:]
for p in pred_param2s:
# print(p)
param2 += " ?" + param_ordering[int(p)]
param2 += ")"
if (t_value[1] != Literal.ABS and t_value[1] != Literal.POS):
param2 += ")"
if (param1 + param2).strip() != "":
fd.write("(and\n")
if param1 != "":
fd.write(param1)
if param2 != "":
fd.write(param2)
for k in range(len(" :effect (and (when ")):
fd.write(" ")
fd.write(")\n")
else:
fd.write("()\n")
fd.write(")\n")
for k in range(len(" :effect (and ")):
fd.write(" ")
fd.write("(when ")
# When (or (!(prec(m1))) (!(prec(m2)))) (create dummy diff)
if (head_m1 + head_m2 + not_head_m1 + not_head_m2).strip() != "":
fd.write("(or \n")
for k in range(len(" :effect (and (when (or ")):
fd.write(" ")
if (head_m1 + not_head_m2).strip() != "":
fd.write("(and \n" + head_m1)
for k in range(len(" :effect (and (when (or (and ")):
fd.write(" ")
if not_head_m2.strip() != "":
fd.write("(or \n" + not_head_m2)
for k in range(len(" :effect (and (when (or (and ")):
fd.write(" ")
fd.write(")\n")
for k in range(len(" :effect (and (when (or ")):
fd.write(" ")
fd.write(")\n")
if (head_m2 + not_head_m1).strip() != "":
for k in range(len(" :effect (and (when (or ")):
fd.write(" ")
fd.write("(and \n" + head_m2)
for k in range(len(" :effect (and (when (or (and ")):
fd.write(" ")
if not_head_m1.strip() != "":
fd.write("(or \n" + not_head_m1)
for k in range(len(" :effect (and (when (or (and ")):
fd.write(" ")
fd.write(")\n")
for k in range(len(" :effect (and (when (or ")):
fd.write(" ")
fd.write(")\n")
for k in range(len(" :effect (and (when ")):
fd.write(" ")
fd.write(")\n")
else:
fd.write("()\n")
for k in range(len(" :effect (and (when ")):
fd.write(" ")
fd.write("(and \n")
for k in range(len(" :effect (and (when (and ")):
fd.write(" ")
fd.write("(dummy_pred_1)\n")
for k in range(len(" :effect (and (when (and ")):
fd.write(" ")
fd.write("(not(dummy_pred_2))\n")
for k in range(len(" :effect (and (when ")):
fd.write(" ")
fd.write(") \n")
for k in range(len(" :effect (and ")):
fd.write(" ")
fd.write(")\n")
for k in range(len(" :effect ")):
fd.write(" ")
fd.write(")\n")
fd.write(")\n\n")
fd.write(")\n")
| 32,456 | 38.533496 | 113 | py |
DAAISy | DAAISy-main/src/utils/__init__.py | from .file_utils import FileUtils
from .helpers import get_model_difference
from .helpers import get_next_predicate
from .helpers import instantiate_pred_with_action
from .helpers import map_pred_action_param
from .helpers import set_to_state
from .helpers import state_to_set
from .helpers import union_states
from .parse_files import ActionDetails
from .parse_files import PredicateDetails
from .parse_files import check_nested
from .parse_files import extract_task
from .parse_files import generate_ds
from .parse_files import get_plan
| 539 | 35 | 49 | py |
DAAISy | DAAISy-main/src/utils/helpers.py | #!/usr/local/bin/python3
# encoding: utf-8
import copy
from src.config import *
def state_to_set(state):
"""
set representation of state: {'above|f0|f1', 'lift_at|f0'}
"""
state_set = []
for p, v in state.items():
if isinstance(v, (list, tuple)) and len(v) > 0:
for i in range(0, len(v)):
items = v[i]
t_init_state = p
if isinstance(items, str):
t_init_state += "|" + items
else:
for _i in items:
t_init_state += "|" + _i
state_set.append(t_init_state)
else:
state_set.append(p)
return set(state_set)
def set_to_state(state_set):
"""
state representation: {'destin': [(...)], 'lift_at': [(...), (...)]}
"""
state = {}
for p in state_set:
pred_params = (p.split("|")[1:])
pred_name = p.split("|")[0]
if pred_name in state.keys():
state[pred_name].append(tuple(pred_params))
else:
state[pred_name] = [tuple(pred_params), ]
return state
def union_states(state1, state2):
joint_state = copy.deepcopy(state1)
for key, val in state2.items():
if key not in joint_state.keys():
joint_state[key] = val
else:
for v in val:
if v not in joint_state[key]:
joint_state[key].append(v)
return joint_state
def map_pred_action_param(pred, action):
"""
:param pred: Pred in format ontable|c
:param action: Action in format pickup|c
:return: pred in format ontable|0
"""
action_name = action.split("|")[0]
action_params = action.split('|')[1:]
pred_params = pred.split("|")[1:]
pred_name = pred.split("|")[0]
for param in pred_params:
if param in action_params:
indx = action_params.index(param)
if indx != -1:
pred_name += "|" + str(indx)
else:
return None, None
if pred.count("|") != pred_name.count("|"):
return None, None
return action_name, pred_name
def instantiate_pred_with_action(pred, action):
"""
:param pred: Pred in format ontable|0
:param action: Action in format pickup|c
:return: pred in format ontable|c
"""
action_params = action.split('|')[1:]
pred_params = pred.split("|")[1:]
pred_name = pred.split("|")[0]
for param in pred_params:
try:
if int(param) < len(action_params):
pred_name += "|" + str(action_params[int(param)])
else:
return None
except IndexError:
return None
except TypeError:
return None
return pred_name
def get_model_difference(model1, model2, pal_tuple_dict):
# model1 is agent model
diff = 0
diff_set = set()
for action in model1.actions:
for pred in model1.actions[action].keys():
for loc in [Location.PRECOND, Location.EFFECTS]:
try:
if not pal_tuple_dict[(action, pred, loc)]:
# print("Not learnt PALM: ", action, pred, loc)
diff_set.add((action, pred, loc))
diff += 1
elif model1.actions[action][pred][loc - 1] != model2.actions[action][pred][loc - 1]: #LOCATION.PRECOND = 1 therefore loc-1 is used to use the right index
diff += 1
# print("Incorrect PALM: ", action, pred, loc)
diff_set.add((action, pred, loc))
# print("In Agent: ", model1.actions[action][pred][loc - 1])
# print("In Model: ", model2.actions[action][pred][loc - 1])
# print("")
except:
print("Here")
print("\n Incorrect PALM:")
for tup in diff_set:
print(tup)
return diff / len(pal_tuple_dict)
def get_next_predicate(all_predicates, abs_predicates):
new_preds = set(all_predicates) - set(abs_predicates)
if len(new_preds) == 0:
return None
else:
return new_preds.pop()
| 4,227 | 29.637681 | 173 | py |
DAAISy | DAAISy-main/src/utils/translate/pddl_to_prolog.py | #! /usr/bin/env python3
import itertools
from . import normalize
from . import pddl_fd as pddl
from . import timers
class PrologProgram:
def __init__(self):
self.facts = []
self.rules = []
self.objects = set()
def predicate_name_generator():
for count in itertools.count():
yield "p$%d" % count
self.new_name = predicate_name_generator()
def add_fact(self, atom):
self.facts.append(Fact(atom))
self.objects |= set(atom.args)
def add_rule(self, rule):
self.rules.append(rule)
def dump(self, file=None):
for fact in self.facts:
print(fact, file=file)
for rule in self.rules:
print(getattr(rule, "type", "none"), rule, file=file)
def normalize(self):
# Normalized prolog programs have the following properties:
# 1. Each variable that occurs in the effect of a rule also occurs in its
# condition.
# 2. The variables that appear in each effect or condition are distinct.
# 3. There are no rules with empty condition.
self.remove_free_effect_variables()
self.split_duplicate_arguments()
self.convert_trivial_rules()
def split_rules(self):
import split_rules
# Splits rules whose conditions can be partitioned in such a way that
# the parts have disjoint variable sets, then split n-ary joins into
# a number of binary joins, introducing new pseudo-predicates for the
# intermediate values.
new_rules = []
for rule in self.rules:
new_rules += split_rules.split_rule(rule, self.new_name)
self.rules = new_rules
def remove_free_effect_variables(self):
"""Remove free effect variables like the variable Y in the rule
p(X, Y) :- q(X). This is done by introducing a new predicate
@object, setting it true for all objects, and translating the above
rule to p(X, Y) :- q(X), @object(Y).
After calling this, no new objects should be introduced!"""
# Note: This should never be necessary for typed domains.
# Leaving it in at the moment regardless.
must_add_predicate = False
for rule in self.rules:
eff_vars = get_variables([rule.effect])
cond_vars = get_variables(rule.conditions)
if not eff_vars.issubset(cond_vars):
must_add_predicate = True
eff_vars -= cond_vars
for var in sorted(eff_vars):
rule.add_condition(pddl.Atom("@object", [var]))
if must_add_predicate:
print("Unbound effect variables: Adding @object predicate.")
self.facts += [Fact(pddl.Atom("@object", [obj])) for obj in self.objects]
def split_duplicate_arguments(self):
"""Make sure that no variable occurs twice within the same symbolic fact,
like the variable X does in p(X, Y, X). This is done by renaming the second
and following occurrences of the variable and adding equality conditions.
For example p(X, Y, X) is translated to p(X, Y, X@0) with the additional
condition =(X, X@0); the equality predicate must be appropriately instantiated
somewhere else."""
printed_message = False
for rule in self.rules:
if rule.rename_duplicate_variables() and not printed_message:
print("Duplicate arguments: Adding equality conditions.")
printed_message = True
def convert_trivial_rules(self):
"""Convert rules with an empty condition into facts.
This must be called after bounding rule effects, so that rules with an
empty condition must necessarily have a variable-free effect.
Variable-free effects are the only ones for which a distinction between
ground and symbolic atoms is not necessary."""
must_delete_rules = []
for i, rule in enumerate(self.rules):
if not rule.conditions:
assert not get_variables([rule.effect])
self.add_fact(pddl.Atom(rule.effect.predicate, rule.effect.args))
must_delete_rules.append(i)
if must_delete_rules:
print("Trivial rules: Converted to facts.")
for rule_no in must_delete_rules[::-1]:
del self.rules[rule_no]
def get_variables(symbolic_atoms):
variables = set()
for sym_atom in symbolic_atoms:
variables |= {arg for arg in sym_atom.args if arg[0] == "?"}
return variables
class Fact:
def __init__(self, atom):
self.atom = atom
def __str__(self):
return "%s." % self.atom
class Rule:
def __init__(self, conditions, effect):
self.conditions = conditions
self.effect = effect
def add_condition(self, condition):
self.conditions.append(condition)
def get_variables(self):
return get_variables(self.conditions + [self.effect])
def _rename_duplicate_variables(self, atom, new_conditions):
used_variables = set()
for i, var_name in enumerate(atom.args):
if var_name[0] == "?":
if var_name in used_variables:
new_var_name = "%s@%d" % (var_name, len(new_conditions))
atom = atom.replace_argument(i, new_var_name)
new_conditions.append(pddl.Atom("=", [var_name, new_var_name]))
else:
used_variables.add(var_name)
return atom
def rename_duplicate_variables(self):
extra_conditions = []
self.effect = self._rename_duplicate_variables(
self.effect, extra_conditions)
old_conditions = self.conditions
self.conditions = []
for condition in old_conditions:
self.conditions.append(self._rename_duplicate_variables(
condition, extra_conditions))
self.conditions += extra_conditions
return bool(extra_conditions)
def __str__(self):
cond_str = ", ".join(map(str, self.conditions))
return "%s :- %s." % (self.effect, cond_str)
def translate_typed_object(prog, obj, type_dict):
supertypes = type_dict[obj.type_name].supertype_names
for type_name in [obj.type_name] + supertypes:
prog.add_fact(pddl.TypedObject(obj.name, type_name).get_atom())
def translate_facts(prog, task):
type_dict = {type.name: type for type in task.types}
for obj in task.objects:
translate_typed_object(prog, obj, type_dict)
for fact in task.init:
assert isinstance(fact, pddl.Atom) or isinstance(fact, pddl.Assign)
if isinstance(fact, pddl.Atom):
prog.add_fact(fact)
def translate(task):
# Note: The function requires that the task has been normalized.
with timers.timing("Generating Datalog program"):
prog = PrologProgram()
translate_facts(prog, task)
for conditions, effect in normalize.build_exploration_rules(task):
prog.add_rule(Rule(conditions, effect))
with timers.timing("Normalizing Datalog program", block=True):
# Using block=True because normalization can output some messages
# in rare cases.
prog.normalize()
prog.split_rules()
return prog
if __name__ == "__main__":
import pddl_parser
task = pddl_parser.open()
normalize.normalize(task)
prog = translate(task)
prog.dump()
| 7,487 | 35.8867 | 86 | py |
DAAISy | DAAISy-main/src/utils/translate/build_model.py | #! /usr/bin/env python3
import itertools
import sys
from functools import reduce
from . import pddl_fd as pddl
from . import timers
def convert_rules(prog):
RULE_TYPES = {
"join": JoinRule,
"product": ProductRule,
"project": ProjectRule,
}
result = []
for rule in prog.rules:
RuleType = RULE_TYPES[rule.type]
new_effect, new_conditions = variables_to_numbers(
rule.effect, rule.conditions)
rule = RuleType(new_effect, new_conditions)
rule.validate()
result.append(rule)
return result
def variables_to_numbers(effect, conditions):
new_effect_args = list(effect.args)
rename_map = {}
for i, arg in enumerate(effect.args):
if arg[0] == "?":
rename_map[arg] = i
new_effect_args[i] = i
new_effect = pddl.Atom(effect.predicate, new_effect_args)
# There are three possibilities for arguments in conditions:
# 1. They are variables that occur in the effect. In that case,
# they are replaced by the corresponding position in the
# effect, as indicated by the rename_map.
# 2. They are constants. In that case, the unifier must guarantee
# that they are matched appropriately. In that case, they are
# not modified (remain strings denoting objects).
# 3. They are variables that don't occur in the effect (are
# projected away). This is only allowed in projection rules.
# Such arguments are also not modified (remain "?x" strings).
new_conditions = []
for cond in conditions:
new_cond_args = [rename_map.get(arg, arg) for arg in cond.args]
new_conditions.append(pddl.Atom(cond.predicate, new_cond_args))
return new_effect, new_conditions
class BuildRule:
def prepare_effect(self, new_atom, cond_index):
effect_args = list(self.effect.args)
cond = self.conditions[cond_index]
for var_no, obj in zip(cond.args, new_atom.args):
if isinstance(var_no, int):
effect_args[var_no] = obj
return effect_args
def __str__(self):
return "%s :- %s" % (self.effect, ", ".join(map(str, self.conditions)))
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self)
class JoinRule(BuildRule):
def __init__(self, effect, conditions):
self.effect = effect
self.conditions = conditions
left_args = conditions[0].args
right_args = conditions[1].args
left_vars = {var for var in left_args if isinstance(var, int)}
right_vars = {var for var in right_args if isinstance(var, int)}
common_vars = sorted(left_vars & right_vars)
self.common_var_positions = [
[args.index(var) for var in common_vars]
for args in (list(left_args), list(right_args))]
self.atoms_by_key = ({}, {})
def validate(self):
assert len(self.conditions) == 2, self
left_args = self.conditions[0].args
right_args = self.conditions[1].args
eff_args = self.effect.args
left_vars = {v for v in left_args
if isinstance(v, int) or v[0] == "?"}
right_vars = {v for v in right_args
if isinstance(v, int) or v[0] == "?"}
eff_vars = {v for v in eff_args
if isinstance(v, int) or v[0] == "?"}
assert left_vars & right_vars, self
assert (left_vars | right_vars) == (left_vars & right_vars) | eff_vars, self
def update_index(self, new_atom, cond_index):
ordered_common_args = [
new_atom.args[position]
for position in self.common_var_positions[cond_index]]
key = tuple(ordered_common_args)
self.atoms_by_key[cond_index].setdefault(key, []).append(new_atom)
def fire(self, new_atom, cond_index, enqueue_func):
effect_args = self.prepare_effect(new_atom, cond_index)
ordered_common_args = [
new_atom.args[position]
for position in self.common_var_positions[cond_index]]
key = tuple(ordered_common_args)
other_cond_index = 1 - cond_index
other_cond = self.conditions[other_cond_index]
for atom in self.atoms_by_key[other_cond_index].get(key, []):
for var_no, obj in zip(other_cond.args, atom.args):
if isinstance(var_no, int):
effect_args[var_no] = obj
enqueue_func(self.effect.predicate, effect_args)
class ProductRule(BuildRule):
def __init__(self, effect, conditions):
self.effect = effect
self.conditions = conditions
self.atoms_by_index = [[] for c in self.conditions]
self.empty_atom_list_no = len(self.conditions)
def validate(self):
assert len(self.conditions) >= 2, self
cond_vars = [{v for v in cond.args
if isinstance(v, int) or v[0] == "?"}
for cond in self.conditions]
all_cond_vars = reduce(set.union, cond_vars)
eff_vars = {v for v in self.effect.args
if isinstance(v, int) or v[0] == "?"}
assert len(all_cond_vars) == len(eff_vars), self
assert len(all_cond_vars) == sum([len(c) for c in cond_vars])
def update_index(self, new_atom, cond_index):
atom_list = self.atoms_by_index[cond_index]
if not atom_list:
self.empty_atom_list_no -= 1
atom_list.append(new_atom)
def _get_bindings(self, atom, cond):
return [(var_no, obj) for var_no, obj in zip(cond.args, atom.args)
if isinstance(var_no, int)]
def fire(self, new_atom, cond_index, enqueue_func):
if self.empty_atom_list_no:
return
# Binding: a (var_no, object) pair
# Bindings: List-of(Binding)
# BindingsFactor: List-of(Bindings)
# BindingsFactors: List-of(BindingsFactor)
bindings_factors = []
for pos, cond in enumerate(self.conditions):
if pos == cond_index:
continue
atoms = self.atoms_by_index[pos]
assert atoms, "if we have no atoms, this should never be called"
factor = [self._get_bindings(atom, cond) for atom in atoms]
bindings_factors.append(factor)
eff_args = self.prepare_effect(new_atom, cond_index)
for bindings_list in itertools.product(*bindings_factors):
bindings = itertools.chain(*bindings_list)
for var_no, obj in bindings:
eff_args[var_no] = obj
enqueue_func(self.effect.predicate, eff_args)
class ProjectRule(BuildRule):
def __init__(self, effect, conditions):
self.effect = effect
self.conditions = conditions
def validate(self):
assert len(self.conditions) == 1
def update_index(self, new_atom, cond_index):
pass
def fire(self, new_atom, cond_index, enqueue_func):
effect_args = self.prepare_effect(new_atom, cond_index)
enqueue_func(self.effect.predicate, effect_args)
class Unifier:
def __init__(self, rules):
self.predicate_to_rule_generator = {}
for rule in rules:
for i, cond in enumerate(rule.conditions):
self._insert_condition(rule, i)
def unify(self, atom):
result = []
generator = self.predicate_to_rule_generator.get(atom.predicate)
if generator:
generator.generate(atom, result)
return result
def _insert_condition(self, rule, cond_index):
condition = rule.conditions[cond_index]
root = self.predicate_to_rule_generator.get(condition.predicate)
if not root:
root = LeafGenerator()
constant_arguments = [
(arg_index, arg)
for (arg_index, arg) in enumerate(condition.args)
if not isinstance(arg, int) and arg[0] != "?"]
newroot = root._insert(constant_arguments, (rule, cond_index))
self.predicate_to_rule_generator[condition.predicate] = newroot
def dump(self):
predicates = sorted(self.predicate_to_rule_generator)
print("Unifier:")
for pred in predicates:
print(" %s:" % pred)
rule_gen = self.predicate_to_rule_generator[pred]
rule_gen.dump(" " * 2)
class LeafGenerator:
index = sys.maxsize
def __init__(self):
self.matches = []
def empty(self):
return not self.matches
def generate(self, atom, result):
result += self.matches
def _insert(self, args, value):
if not args:
self.matches.append(value)
return self
else:
root = LeafGenerator()
root.matches.append(value)
for arg_index, arg in args[::-1]:
new_root = MatchGenerator(arg_index, LeafGenerator())
new_root.match_generator[arg] = root
root = new_root
root.matches = self.matches # can be swapped in C++
return root
def dump(self, indent):
for match in self.matches:
print("%s%s" % (indent, match))
class MatchGenerator:
def __init__(self, index, next):
self.index = index
self.matches = []
self.match_generator = {}
self.next = next
def empty(self):
return False
def generate(self, atom, result):
result += self.matches
generator = self.match_generator.get(atom.args[self.index])
if generator:
generator.generate(atom, result)
self.next.generate(atom, result)
def _insert(self, args, value):
if not args:
self.matches.append(value)
return self
else:
arg_index, arg = args[0]
if self.index < arg_index:
self.next = self.next._insert(args, value)
return self
elif self.index > arg_index:
new_parent = MatchGenerator(arg_index, self)
new_branch = LeafGenerator()._insert(args[1:], value)
new_parent.match_generator[arg] = new_branch
return new_parent
else:
branch_generator = self.match_generator.get(arg)
if not branch_generator:
branch_generator = LeafGenerator()
self.match_generator[arg] = branch_generator._insert(
args[1:], value)
return self
def dump(self, indent):
for match in self.matches:
print("%s%s" % (indent, match))
for key in sorted(self.match_generator.keys()):
print("%sargs[%s] == %s:" % (indent, self.index, key))
self.match_generator[key].dump(indent + " ")
if not self.next.empty():
assert isinstance(self.next, MatchGenerator)
print("%s[*]" % indent)
self.next.dump(indent + " ")
class Queue:
def __init__(self, atoms):
self.queue = atoms
self.queue_pos = 0
self.enqueued = {(atom.predicate,) + tuple(atom.args)
for atom in self.queue}
self.num_pushes = len(atoms)
def __bool__(self):
return self.queue_pos < len(self.queue)
__nonzero__ = __bool__
def push(self, predicate, args):
self.num_pushes += 1
eff_tuple = (predicate,) + tuple(args)
if eff_tuple not in self.enqueued:
self.enqueued.add(eff_tuple)
self.queue.append(pddl.Atom(predicate, list(args)))
def pop(self):
result = self.queue[self.queue_pos]
self.queue_pos += 1
return result
def compute_model(prog):
with timers.timing("Preparing model"):
rules = convert_rules(prog)
unifier = Unifier(rules)
# unifier.dump()
fact_atoms = sorted(fact.atom for fact in prog.facts)
queue = Queue(fact_atoms)
print("Generated %d rules." % len(rules))
with timers.timing("Computing model"):
relevant_atoms = 0
auxiliary_atoms = 0
while queue:
next_atom = queue.pop()
pred = next_atom.predicate
if isinstance(pred, str) and "$" in pred:
auxiliary_atoms += 1
else:
relevant_atoms += 1
matches = unifier.unify(next_atom)
for rule, cond_index in matches:
rule.update_index(next_atom, cond_index)
rule.fire(next_atom, cond_index, queue.push)
print("%d relevant atoms" % relevant_atoms)
print("%d auxiliary atoms" % auxiliary_atoms)
print("%d final queue length" % len(queue.queue))
print("%d total queue pushes" % queue.num_pushes)
return queue.queue
if __name__ == "__main__":
import pddl_parser
import normalize
import pddl_to_prolog
print("Parsing...")
task = pddl_parser.open()
print("Normalizing...")
normalize.normalize(task)
print("Writing rules...")
prog = pddl_to_prolog.translate(task)
model = compute_model(prog)
for atom in model:
print(atom)
print("%d atoms" % len(model))
| 13,173 | 33.577428 | 84 | py |
DAAISy | DAAISy-main/src/utils/translate/invariant_finder.py | #! /usr/bin/env python3
import itertools
import time
from collections import deque, defaultdict
from . import invariants
from . import options
from . import pddl_fd as pddl
from . import timers
class BalanceChecker:
def __init__(self, task, reachable_action_params):
self.predicates_to_add_actions = defaultdict(set)
self.action_to_heavy_action = {}
for act in task.actions:
action = self.add_inequality_preconds(act, reachable_action_params)
too_heavy_effects = []
create_heavy_act = False
heavy_act = action
for eff in action.effects:
too_heavy_effects.append(eff)
if eff.parameters: # universal effect
create_heavy_act = True
too_heavy_effects.append(eff.copy())
if not eff.literal.negated:
predicate = eff.literal.predicate
self.predicates_to_add_actions[predicate].add(action)
if create_heavy_act:
heavy_act = pddl.Action(action.name, action.parameters,
action.num_external_parameters,
action.precondition, too_heavy_effects,
action.cost)
# heavy_act: duplicated universal effects and assigned unique names
# to all quantified variables (implicitly in constructor)
self.action_to_heavy_action[action] = heavy_act
def get_threats(self, predicate):
return self.predicates_to_add_actions.get(predicate, set())
def get_heavy_action(self, action):
return self.action_to_heavy_action[action]
def add_inequality_preconds(self, action, reachable_action_params):
if reachable_action_params is None or len(action.parameters) < 2:
return action
inequal_params = []
combs = itertools.combinations(range(len(action.parameters)), 2)
for pos1, pos2 in combs:
for params in reachable_action_params[action]:
if params[pos1] == params[pos2]:
break
else:
inequal_params.append((pos1, pos2))
if inequal_params:
precond_parts = [action.precondition]
for pos1, pos2 in inequal_params:
param1 = action.parameters[pos1].name
param2 = action.parameters[pos2].name
new_cond = pddl.NegatedAtom("=", (param1, param2))
precond_parts.append(new_cond)
precond = pddl.Conjunction(precond_parts).simplified()
return pddl.Action(
action.name, action.parameters, action.num_external_parameters,
precond, action.effects, action.cost)
else:
return action
def get_fluents(task):
fluent_names = set()
for action in task.actions:
for eff in action.effects:
fluent_names.add(eff.literal.predicate)
return [pred for pred in task.predicates if pred.name in fluent_names]
def get_initial_invariants(task):
for predicate in get_fluents(task):
all_args = list(range(len(predicate.arguments)))
for omitted_arg in [-1] + all_args:
order = [i for i in all_args if i != omitted_arg]
part = invariants.InvariantPart(predicate.name, order, omitted_arg)
yield invariants.Invariant((part,))
def find_invariants(task, reachable_action_params):
limit = options.invariant_generation_max_candidates
candidates = deque(itertools.islice(get_initial_invariants(task), 0, limit))
print(len(candidates), "initial candidates")
seen_candidates = set(candidates)
balance_checker = BalanceChecker(task, reachable_action_params)
def enqueue_func(invariant):
if len(seen_candidates) < limit and invariant not in seen_candidates:
candidates.append(invariant)
seen_candidates.add(invariant)
start_time = time.process_time()
while candidates:
candidate = candidates.popleft()
if time.process_time() - start_time > options.invariant_generation_max_time:
print("Time limit reached, aborting invariant generation")
return
if candidate.check_balance(balance_checker, enqueue_func):
yield candidate
def useful_groups(invariants, initial_facts):
predicate_to_invariants = defaultdict(list)
for invariant in invariants:
for predicate in invariant.predicates:
predicate_to_invariants[predicate].append(invariant)
nonempty_groups = set()
overcrowded_groups = set()
for atom in initial_facts:
if isinstance(atom, pddl.Assign):
continue
for invariant in predicate_to_invariants.get(atom.predicate, ()):
group_key = (invariant, tuple(invariant.get_parameters(atom)))
if group_key not in nonempty_groups:
nonempty_groups.add(group_key)
else:
overcrowded_groups.add(group_key)
useful_groups = nonempty_groups - overcrowded_groups
for (invariant, parameters) in useful_groups:
yield [part.instantiate(parameters) for part in sorted(invariant.parts)]
def get_groups(task, reachable_action_params=None):
with timers.timing("Finding invariants", block=True):
invariants = sorted(find_invariants(task, reachable_action_params))
with timers.timing("Checking invariant weight"):
result = list(useful_groups(invariants, task.init))
return result
if __name__ == "__main__":
import normalize
import pddl_parser
print("Parsing...")
task = pddl_parser.open()
print("Normalizing...")
normalize.normalize(task)
print("Finding invariants...")
print("NOTE: not passing in reachable_action_params.")
print("This means fewer invariants might be found.")
for invariant in find_invariants(task, None):
print(invariant)
print("Finding fact groups...")
groups = get_groups(task)
for group in groups:
print("[%s]" % ", ".join(map(str, group)))
| 6,147 | 37.425 | 84 | py |
DAAISy | DAAISy-main/src/utils/translate/split_rules.py | # split_rules: Split rules whose conditions fall into different "connected
# components" (where to conditions are related if they share a variabe) into
# several rules, one for each connected component and one high-level rule.
from . import graph
from . import greedy_join
from . import pddl_fd as pddl
from pddl_to_prolog import Rule, get_variables
def get_connected_conditions(conditions):
agraph = graph.Graph(conditions)
var_to_conditions = {var: [] for var in get_variables(conditions)}
for cond in conditions:
for var in cond.args:
if var[0] == "?":
var_to_conditions[var].append(cond)
# Connect conditions with a common variable
for var, conds in var_to_conditions.items():
for cond in conds[1:]:
agraph.connect(conds[0], cond)
return sorted(map(sorted, agraph.connected_components()))
def project_rule(rule, conditions, name_generator):
predicate = next(name_generator)
effect_variables = set(rule.effect.args) & get_variables(conditions)
effect = pddl.Atom(predicate, sorted(effect_variables))
projected_rule = Rule(conditions, effect)
return projected_rule
def split_rule(rule, name_generator):
important_conditions, trivial_conditions = [], []
for cond in rule.conditions:
for arg in cond.args:
if arg[0] == "?":
important_conditions.append(cond)
break
else:
trivial_conditions.append(cond)
# important_conditions = [cond for cond in rule.conditions if cond.args]
# trivial_conditions = [cond for cond in rule.conditions if not cond.args]
components = get_connected_conditions(important_conditions)
if len(components) == 1 and not trivial_conditions:
return split_into_binary_rules(rule, name_generator)
projected_rules = [project_rule(rule, conditions, name_generator)
for conditions in components]
result = []
for proj_rule in projected_rules:
result += split_into_binary_rules(proj_rule, name_generator)
conditions = ([proj_rule.effect for proj_rule in projected_rules] +
trivial_conditions)
combining_rule = Rule(conditions, rule.effect)
if len(conditions) >= 2:
combining_rule.type = "product"
else:
combining_rule.type = "project"
result.append(combining_rule)
return result
def split_into_binary_rules(rule, name_generator):
if len(rule.conditions) <= 1:
rule.type = "project"
return [rule]
return greedy_join.greedy_join(rule, name_generator)
| 2,605 | 34.69863 | 78 | py |
DAAISy | DAAISy-main/src/utils/translate/variable_order.py | import heapq
from collections import defaultdict, deque
from itertools import chain
from . import sccs
DEBUG = False
class CausalGraph:
"""Weighted causal graph used for defining a variable order.
The causal graph only contains pre->eff edges (in contrast to the
variant that also has eff<->eff edges).
The variable order is defined such that removing all edges v->v'
with v>v' induces an acyclic subgraph of the causal graph. This
corresponds to the pruning of the causal graph as described in the
JAIR 2006 Fast Downward paper for the causal graph heuristic. The
greedy method is based on weighting the edges of the causal graph.
In this implementation these weights slightly differ from the
description in the JAIR paper to reproduce the behaviour of the
original implementation in the preprocessor component of the
planner.
"""
def __init__(self, sas_task):
self.weighted_graph = defaultdict(lambda: defaultdict(int))
## var_no -> (var_no -> number)
self.predecessor_graph = defaultdict(set)
self.ordering = []
self.weight_graph_from_ops(sas_task.operators)
self.weight_graph_from_axioms(sas_task.axioms)
self.num_variables = len(sas_task.variables.ranges)
self.goal_map = dict(sas_task.goal.pairs)
def get_ordering(self):
if not self.ordering:
sccs = self.get_strongly_connected_components()
self.calculate_topological_pseudo_sort(sccs)
return self.ordering
def weight_graph_from_ops(self, operators):
### A source variable can be processed several times. This was
### probably not intended originally but in experiments (cf.
### issue26) it performed better than the (clearer) weighting
### described in the Fast Downward paper (which would require
### a more complicated implementation).
for op in operators:
source_vars = [var for (var, value) in op.prevail]
for var, pre, _, _ in op.pre_post:
if pre != -1:
source_vars.append(var)
for target, _, _, cond in op.pre_post:
for source in chain(source_vars, (var for var, _ in cond)):
if source != target:
self.weighted_graph[source][target] += 1
self.predecessor_graph[target].add(source)
def weight_graph_from_axioms(self, axioms):
for ax in axioms:
target = ax.effect[0]
for source, _ in ax.condition:
if source != target:
self.weighted_graph[source][target] += 1
self.predecessor_graph[target].add(source)
def get_strongly_connected_components(self):
unweighted_graph = [[] for _ in range(self.num_variables)]
assert (len(self.weighted_graph) <= self.num_variables)
for source, target_weights in self.weighted_graph.items():
unweighted_graph[source] = sorted(target_weights.keys())
return sccs.get_sccs_adjacency_list(unweighted_graph)
def calculate_topological_pseudo_sort(self, sccs):
for scc in sccs:
if len(scc) > 1:
# component needs to be turned into acyclic subgraph
# Compute subgraph induced by scc
subgraph = defaultdict(list)
for var in scc:
# for each variable in component only list edges inside
# component.
subgraph_edges = subgraph[var]
for target, cost in sorted(self.weighted_graph[var].items()):
if target in scc:
if target in self.goal_map:
subgraph_edges.append((target, 100000 + cost))
subgraph_edges.append((target, cost))
self.ordering.extend(MaxDAG(subgraph, scc).get_result())
else:
self.ordering.append(scc[0])
def calculate_important_vars(self, goal):
# Note for future refactoring: it is perhaps more idiomatic
# and efficient to use a set rather than a defaultdict(bool).
necessary = defaultdict(bool)
for var, _ in goal.pairs:
if not necessary[var]:
necessary[var] = True
self.dfs(var, necessary)
return necessary
def dfs(self, node, necessary):
stack = [pred for pred in self.predecessor_graph[node]]
while stack:
n = stack.pop()
if not necessary[n]:
necessary[n] = True
stack.extend(pred for pred in self.predecessor_graph[n])
class MaxDAG:
"""Defines a variable ordering for a SCC of the (weighted) causal
graph.
Conceptually, the greedy algorithm successively picks a node with
minimal cummulated weight of incoming arcs and removes its
incident edges from the graph until only a single node remains
(cf. computation of total order of vertices when pruning the
causal graph in the Fast Downward JAIR 2006 paper).
"""
def __init__(self, graph, input_order):
self.weighted_graph = graph
# input_order is only used to get the same tie-breaking as
# with the old preprocessor
self.input_order = input_order
def get_result(self):
incoming_weights = defaultdict(int)
for weighted_edges in self.weighted_graph.values():
for target, weight in weighted_edges:
incoming_weights[target] += weight
weight_to_nodes = defaultdict(deque)
for node in self.input_order:
weight = incoming_weights[node]
weight_to_nodes[weight].append(node)
weights = list(weight_to_nodes.keys())
heapq.heapify(weights)
done = set()
result = []
while weights:
min_key = weights[0]
min_elem = None
entries = weight_to_nodes[min_key]
while entries and (min_elem is None or min_elem in done or
min_key > incoming_weights[min_elem]):
min_elem = entries.popleft()
if not entries:
del weight_to_nodes[min_key]
heapq.heappop(weights) # remove min_key from heap
if min_elem is None or min_elem in done:
# since we use lazy deletion from the heap weights,
# there can be weights with a "done" entry in
# weight_to_nodes
continue
done.add(min_elem)
result.append(min_elem)
for target, weight in self.weighted_graph[min_elem]:
if target not in done:
weight = weight % 100000
if weight == 0:
continue
old_in_weight = incoming_weights[target]
new_in_weight = old_in_weight - weight
incoming_weights[target] = new_in_weight
# add new entry to weight_to_nodes
if new_in_weight not in weight_to_nodes:
heapq.heappush(weights, new_in_weight)
weight_to_nodes[new_in_weight].append(target)
return result
class VariableOrder:
"""Apply a given variable order to a SAS task."""
def __init__(self, ordering):
"""Ordering is a list of variable numbers in the desired order.
If a variable does not occur in the ordering, it is removed
from the task.
"""
self.ordering = ordering
self.new_var = {v: i for i, v in enumerate(ordering)}
def apply_to_task(self, sas_task):
self._apply_to_variables(sas_task.variables)
self._apply_to_init(sas_task.init)
self._apply_to_goal(sas_task.goal)
self._apply_to_mutexes(sas_task.mutexes)
self._apply_to_operators(sas_task.operators)
self._apply_to_axioms(sas_task.axioms)
if DEBUG:
sas_task.validate()
def _apply_to_variables(self, variables):
ranges = []
layers = []
names = []
for index, var in enumerate(self.ordering):
ranges.append(variables.ranges[var])
layers.append(variables.axiom_layers[var])
names.append(variables.value_names[var])
variables.ranges = ranges
variables.axiom_layers = layers
variables.value_names = names
def _apply_to_init(self, init):
init.values = [init.values[var] for var in self.ordering]
def _apply_to_goal(self, goal):
goal.pairs = sorted((self.new_var[var], val)
for var, val in goal.pairs
if var in self.new_var)
def _apply_to_mutexes(self, mutexes):
new_mutexes = []
for group in mutexes:
facts = [(self.new_var[var], val) for var, val in group.facts
if var in self.new_var]
if facts and len({var for var, _ in facts}) > 1:
group.facts = facts
new_mutexes.append(group)
print("%s of %s mutex groups necessary." % (len(new_mutexes),
len(mutexes)))
mutexes[:] = new_mutexes
def _apply_to_operators(self, operators):
new_ops = []
for op in operators:
pre_post = []
for eff_var, pre, post, cond in op.pre_post:
if eff_var in self.new_var:
new_cond = list((self.new_var[var], val)
for var, val in cond
if var in self.new_var)
pre_post.append(
(self.new_var[eff_var], pre, post, new_cond))
if pre_post:
op.pre_post = pre_post
op.prevail = [(self.new_var[var], val)
for var, val in op.prevail
if var in self.new_var]
new_ops.append(op)
print("%s of %s operators necessary." % (len(new_ops),
len(operators)))
operators[:] = new_ops
def _apply_to_axioms(self, axioms):
new_axioms = []
for ax in axioms:
eff_var, eff_val = ax.effect
if eff_var in self.new_var:
ax.condition = [(self.new_var[var], val)
for var, val in ax.condition
if var in self.new_var]
ax.effect = (self.new_var[eff_var], eff_val)
new_axioms.append(ax)
print("%s of %s axiom rules necessary." % (len(new_axioms),
len(axioms)))
axioms[:] = new_axioms
def find_and_apply_variable_order(sas_task, reorder_vars=True,
filter_unimportant_vars=True):
if reorder_vars or filter_unimportant_vars:
cg = CausalGraph(sas_task)
if reorder_vars:
order = cg.get_ordering()
else:
order = list(range(len(sas_task.variables.ranges)))
if filter_unimportant_vars:
necessary = cg.calculate_important_vars(sas_task.goal)
print("%s of %s variables necessary." % (len(necessary),
len(order)))
order = [var for var in order if necessary[var]]
VariableOrder(order).apply_to_task(sas_task)
| 11,612 | 39.463415 | 81 | py |
DAAISy | DAAISy-main/src/utils/translate/axiom_rules.py | from collections import defaultdict
from . import pddl_fd as pddl
from . import sccs
from . import timers
DEBUG = False
def handle_axioms(operators, axioms, goals):
axioms_by_atom = get_axioms_by_atom(axioms)
axiom_literals = compute_necessary_axiom_literals(axioms_by_atom, operators, goals)
axiom_init = get_axiom_init(axioms_by_atom, axiom_literals)
with timers.timing("Simplifying axioms"):
axioms = simplify_axioms(axioms_by_atom, axiom_literals)
axioms = compute_negative_axioms(axioms_by_atom, axiom_literals)
# NOTE: compute_negative_axioms more or less invalidates axioms_by_atom.
# Careful with that axe, Eugene!
axiom_layers = compute_axiom_layers(axioms, axiom_init)
if DEBUG:
verify_layering_condition(axioms, axiom_init, axiom_layers)
return axioms, list(axiom_init), axiom_layers
def verify_layering_condition(axioms, axiom_init, axiom_layers):
# This function is only used for debugging.
variables_in_heads = set()
literals_in_heads = set()
variables_with_layers = set()
for axiom in axioms:
head = axiom.effect
variables_in_heads.add(head.positive())
literals_in_heads.add(head)
variables_with_layers = set(axiom_layers.keys())
# 1. Each derived variable only appears in heads with one
# polarity, i.e., never positively *and* negatively.
if False:
print("Verifying 1...")
for literal in literals_in_heads:
assert literal.negate() not in literals_in_heads, literal
else:
print("Verifying 1... [skipped]")
# We currently violate this condition because we introduce
# "negated axioms". See issue454 and issue453.
# 2. A variable has a defined layer iff it appears in a head.
# (This is stricter than it needs to be; we could allow
# derived variables that are never generated by a rule.
# But this test follows the axiom simplification step, and
# after simplification this should not be too strict.)
# All layers are integers and at least 0.
# (Note: the "-1" layer for non-derived variables is
# set elsewhere.)
print("Verifying 2...")
assert variables_in_heads == variables_with_layers
for atom, layer in axiom_layers.items():
assert isinstance(layer, int)
assert layer >= 0
# 3. For every derived variable, it occurs in axiom_init iff
# its negation occurs as the head of an axiom.
if False:
print("Verifying 3...")
for init in list(axiom_init):
assert init.negate() in literals_in_heads
for literal in literals_in_heads:
assert (literal.negated) == (literal.positive() in axiom_init)
else:
print("Verifying 3 [weaker version]...")
# We currently violate this condition because we introduce
# "negated axioms". See issue454 and issue453.
#
# The weaker version we test here is "For every derived variable:
# [it occurs in axiom_init iff its negation occurs as the
# head of an axiom] OR [it occurs with both polarities in
# heads of axioms]."
for init in list(axiom_init):
assert init.negate() in literals_in_heads
for literal in literals_in_heads:
assert ((literal.negated) == (literal.positive() in axiom_init)
or (literal.negate() in literals_in_heads))
# 4. For every rule head <- ... cond ... where cond is a literal
# of a derived variable where the layer of head is equal to
# the layer of cond, cond occurs with the same polarity in heads.
#
# Note regarding issue454 and issue453: Because of the negated axioms
# mentioned in these issues, a derived variable may appear with *both*
# polarities in heads. This makes this test less strong than it would
# be otherwise. When these issues are addressed and axioms only occur
# with one polarity in heads, this test will remain correct in its
# current form, but it will be able to detect more violations of the
# layering property.
print("Verifying 4...")
for axiom in axioms:
head = axiom.effect
head_positive = head.positive()
body = axiom.condition
for cond in body:
cond_positive = cond.positive()
if (cond_positive in variables_in_heads and
axiom_layers[cond_positive] == axiom_layers[head_positive]):
assert cond in literals_in_heads
# 5. For every rule head <- ... cond ... where cond is a literal
# of a derived variable, the layer of head is greater or equal
# to the layer of cond.
print("Verifying 5...")
for axiom in axioms:
head = axiom.effect
head_positive = head.positive()
body = axiom.condition
for cond in body:
cond_positive = cond.positive()
if cond_positive in variables_in_heads:
# We need the assertion to be on a single line for
# our error handler to be able to print the line.
assert (axiom_layers[cond_positive] <= axiom_layers[head_positive]), (
axiom_layers[cond_positive], axiom_layers[head_positive])
def get_axioms_by_atom(axioms):
axioms_by_atom = {}
for axiom in axioms:
axioms_by_atom.setdefault(axiom.effect, []).append(axiom)
return axioms_by_atom
def compute_axiom_layers(axioms, axiom_init):
# We include this assertion to make sure testing membership in
# axiom_init is efficient.
assert isinstance(axiom_init, set)
# Collect all atoms for derived variables.
derived_atoms = set()
for axiom in axioms:
head_atom = axiom.effect.positive()
derived_atoms.add(head_atom)
# Collect dependencies between derived variables:
# 1. "u depends on v" if there is an axiom with variable u
# in the head and variable v in the body.
# 2. "u NBF-depends on v" if additionally the value with which
# v occurs in the body is its NBF (negation-by-failure) value.
#
# We represent depends_on as a dictionary mapping each "u" to
# the list of "v"s such that u depends on v. Note that we do not
# use a defaultdict because the SCC finding algorithm requires
# that all nodes are present as keys in the dict, even if they
# have no successors.
#
# We do not represent NBF-depends on independently, but we do keep
# of a set of triples "weighted_depends_on" which contains all
# triples (u, v, weight) representing dependencies from u to v,
# where weight is 1 for NBF dependencies and 0 for other
# dependencies. Each such triple represents the constraint
# layer(u) >= layer(v) + weight.
depends_on = {u: [] for u in derived_atoms}
weighted_depends_on = set()
for axiom in axioms:
if (axiom.effect in axiom_init or
axiom.effect.negated and axiom.effect.positive() not in axiom_init):
# Skip axioms whose head is the negation-by-failure value.
# These are redundant axioms that should eventually go away
# or at least have some kind of special status that marks
# them as "not the primary axioms".
continue
u = axiom.effect.positive()
for condition in axiom.condition:
v = condition.positive()
if v in derived_atoms:
v_polarity = not condition.negated
v_init_polarity = v in axiom_init
# TODO: Don't include duplicates in depends_on.
depends_on[u].append(v)
if v_polarity == v_init_polarity:
weight = 1
else:
weight = 0
weighted_depends_on.add((u, v, weight))
# Compute the SCCs of dependencies according to depends_on,
# in topological order.
atom_sccs = sccs.get_sccs_adjacency_dict(depends_on)
# Compute an index mapping each atom to the id of its SCC.
atom_to_scc_id = {}
for scc in atom_sccs:
scc_id = id(scc)
for atom in scc:
atom_to_scc_id[atom] = scc_id
# Compute a weighted digraph representing the dependencies
# between SCCs. SCCs U and V are represented by their IDs.
# - We have id(V) in scc_weighted_depends_on[id(U)] iff
# some variable u in U depends on some variable v in V.
# - If there is a dependency, scc_weighted_depends_on[id(U)][id(V)]
# is the weight of the dependency: +1 if an NBF-dependency
# exists, 0 otherwise.
# We want the digraph to be acyclic and hence ignore self-loops.
# A self-loop of weight 1 indicates non-stratifiability.
scc_weighted_depends_on = defaultdict(dict)
for u, v, weight in weighted_depends_on:
scc_u_id = atom_to_scc_id[u]
scc_v_id = atom_to_scc_id[v]
if scc_u_id == scc_v_id:
# Ignore self-loops unless they are self-loops based on
# NBF dependencies, which occur iff the axioms are
# non-stratifiable.
if weight == 1:
raise ValueError(
"Cyclic dependencies in axioms; cannot stratify.")
else:
old_weight = scc_weighted_depends_on[scc_u_id].get(scc_v_id, -1)
if weight > old_weight:
scc_weighted_depends_on[scc_u_id][scc_v_id] = weight
# The layer of variable u is the longest path (taking into account
# the weights) in the weighted digraph defined by
# scc_weighted_depends_on from the SCC of u to any sink.
# We first compute the longest paths in the SCC digraph. This
# computation exploits that atom_sccs is given in
# topological sort order.
scc_id_to_layer = {}
for scc in reversed(atom_sccs):
scc_id = id(scc)
layer = 0
for succ_scc_id, weight in scc_weighted_depends_on[scc_id].items():
layer = max(layer, scc_id_to_layer[succ_scc_id] + weight)
scc_id_to_layer[scc_id] = layer
# Finally, we set the layers for all nodes based on the layers of
# their SCCs.
layers = {}
for scc in atom_sccs:
scc_layer = scc_id_to_layer[id(scc)]
for atom in scc:
layers[atom] = scc_layer
return layers
def compute_necessary_axiom_literals(axioms_by_atom, operators, goal):
necessary_literals = set()
queue = []
def register_literals(literals, negated):
for literal in literals:
if literal.positive() in axioms_by_atom: # This is an axiom literal
if negated:
literal = literal.negate()
if literal not in necessary_literals:
necessary_literals.add(literal)
queue.append(literal)
# Initialize queue with axioms required for goal and operators.
register_literals(goal, False)
for op in operators:
register_literals(op.precondition, False)
for (cond, _) in op.add_effects:
register_literals(cond, False)
for (cond, _) in op.del_effects:
register_literals(cond, True)
while queue:
literal = queue.pop()
axioms = axioms_by_atom[literal.positive()]
for axiom in axioms:
register_literals(axiom.condition, literal.negated)
return necessary_literals
def get_axiom_init(axioms_by_atom, necessary_literals):
result = set()
for atom in axioms_by_atom:
if atom not in necessary_literals and atom.negate() in necessary_literals:
# Initial value for axiom: False (which is omitted due to closed world
# assumption) unless it is only needed negatively.
result.add(atom)
return result
def simplify_axioms(axioms_by_atom, necessary_literals):
necessary_atoms = {literal.positive() for literal in necessary_literals}
new_axioms = []
for atom in necessary_atoms:
axioms = simplify(axioms_by_atom[atom])
axioms_by_atom[atom] = axioms
new_axioms += axioms
return new_axioms
def remove_duplicates(alist):
next_elem = 1
for i in range(1, len(alist)):
if alist[i] != alist[i - 1]:
alist[next_elem] = alist[i]
next_elem += 1
alist[next_elem:] = []
def simplify(axioms):
"""Remove duplicate axioms, duplicates within axioms, and dominated axioms."""
# Remove duplicates from axiom conditions.
for axiom in axioms:
axiom.condition.sort()
remove_duplicates(axiom.condition)
# Remove dominated axioms.
axioms_to_skip = set()
axioms_by_literal = {}
for axiom in axioms:
if axiom.effect in axiom.condition:
axioms_to_skip.add(id(axiom))
else:
for literal in axiom.condition:
axioms_by_literal.setdefault(literal, set()).add(id(axiom))
for axiom in axioms:
if id(axiom) in axioms_to_skip:
continue # Required to keep one of multiple identical axioms.
if not axiom.condition: # empty condition: dominates everything
return [axiom]
literals = iter(axiom.condition)
dominated_axioms = axioms_by_literal[next(literals)]
for literal in literals:
dominated_axioms &= axioms_by_literal[literal]
for dominated_axiom in dominated_axioms:
if dominated_axiom != id(axiom):
axioms_to_skip.add(dominated_axiom)
return [axiom for axiom in axioms if id(axiom) not in axioms_to_skip]
def compute_negative_axioms(axioms_by_atom, necessary_literals):
new_axioms = []
for literal in necessary_literals:
if literal.negated:
new_axioms += negate(axioms_by_atom[literal.positive()])
else:
new_axioms += axioms_by_atom[literal]
return new_axioms
def negate(axioms):
assert axioms
result = [pddl.PropositionalAxiom(axioms[0].name, [], axioms[0].effect.negate())]
for axiom in axioms:
condition = axiom.condition
if len(condition) == 0:
# The derived fact we want to negate is triggered with an
# empty condition, so it is always true and its negation
# is always false.
return []
elif len(condition) == 1: # Handle easy special case quickly.
new_literal = condition[0].negate()
for result_axiom in result:
result_axiom.condition.append(new_literal)
else:
new_result = []
for literal in condition:
literal = literal.negate()
for result_axiom in result:
new_axiom = result_axiom.clone()
new_axiom.condition.append(literal)
new_result.append(new_axiom)
result = new_result
result = simplify(result)
return result
| 14,906 | 38.858289 | 87 | py |
DAAISy | DAAISy-main/src/utils/translate/translate.py | #! /usr/bin/env python3
import os
import sys
import traceback
def python_version_supported():
return sys.version_info >= (3, 6)
if not python_version_supported():
sys.exit("Error: Translator only supports Python >= 3.6.")
from collections import defaultdict
from copy import deepcopy
from itertools import product
from . import axiom_rules
from . import fact_groups
from . import instantiate
from . import normalize
from . import options
from . import pddl_fd as pddl
from . import pddl_parser
from . import sas_tasks
import signal
from . import simplify
from . import timers
from . import tools
from . import variable_order
# TODO: The translator may generate trivial derived variables which are always
# true, for example if there ia a derived predicate in the input that only
# depends on (non-derived) variables which are detected as always true.
# Such a situation was encountered in the PSR-STRIPS-DerivedPredicates domain.
# Such "always-true" variables should best be compiled away, but it is
# not clear what the best place to do this should be. Similar
# simplifications might be possible elsewhere, for example if a
# derived variable is synonymous with another variable (derived or
# non-derived).
DEBUG = False
## For a full list of exit codes, please see driver/returncodes.py. Here,
## we only list codes that are used by the translator component of the planner.
TRANSLATE_OUT_OF_MEMORY = 20
TRANSLATE_OUT_OF_TIME = 21
simplified_effect_condition_counter = 0
added_implied_precondition_counter = 0
def strips_to_sas_dictionary(groups, assert_partial):
dictionary = {}
for var_no, group in enumerate(groups):
for val_no, atom in enumerate(group):
dictionary.setdefault(atom, []).append((var_no, val_no))
if assert_partial:
assert all(len(sas_pairs) == 1
for sas_pairs in dictionary.values())
return [len(group) + 1 for group in groups], dictionary
def translate_strips_conditions_aux(conditions, dictionary, ranges):
condition = {}
for fact in conditions:
if fact.negated:
# we handle negative conditions later, because then we
# can recognize when the negative condition is already
# ensured by a positive condition
continue
for var, val in dictionary.get(fact, ()):
# The default () here is a bit of a hack. For goals (but
# only for goals!), we can get static facts here. They
# cannot be statically false (that would have been
# detected earlier), and hence they are statically true
# and don't need to be translated.
# TODO: This would not be necessary if we dealt with goals
# in the same way we deal with operator preconditions etc.,
# where static facts disappear during grounding. So change
# this when the goal code is refactored (also below). (**)
if (condition.get(var) is not None and
val not in condition.get(var)):
# Conflicting conditions on this variable: Operator invalid.
return None
condition[var] = {val}
def number_of_values(var_vals_pair):
var, vals = var_vals_pair
return len(vals)
for fact in conditions:
if fact.negated:
## Note: here we use a different solution than in Sec. 10.6.4
## of the thesis. Compare the last sentences of the third
## paragraph of the section.
## We could do what is written there. As a test case,
## consider Airport ADL tasks with only one airport, where
## (occupied ?x) variables are encoded in a single variable,
## and conditions like (not (occupied ?x)) do occur in
## preconditions.
## However, here we avoid introducing new derived predicates
## by treat the negative precondition as a disjunctive
## precondition and expanding it by "multiplying out" the
## possibilities. This can lead to an exponential blow-up so
## it would be nice to choose the behaviour as an option.
done = False
new_condition = {}
atom = pddl.Atom(fact.predicate, fact.args) # force positive
for var, val in dictionary.get(atom, ()):
# see comment (**) above
poss_vals = set(range(ranges[var]))
poss_vals.remove(val)
if condition.get(var) is None:
assert new_condition.get(var) is None
new_condition[var] = poss_vals
else:
# constrain existing condition on var
prev_possible_vals = condition.get(var)
done = True
prev_possible_vals.intersection_update(poss_vals)
if len(prev_possible_vals) == 0:
# Conflicting conditions on this variable:
# Operator invalid.
return None
if not done and len(new_condition) != 0:
# we did not enforce the negative condition by constraining
# an existing condition on one of the variables representing
# this atom. So we need to introduce a new condition:
# We can select any from new_condition and currently prefer the
# smallest one.
candidates = sorted(new_condition.items(), key=number_of_values)
var, vals = candidates[0]
condition[var] = vals
def multiply_out(condition): # destroys the input
sorted_conds = sorted(condition.items(), key=number_of_values)
flat_conds = [{}]
for var, vals in sorted_conds:
if len(vals) == 1:
for cond in flat_conds:
cond[var] = vals.pop() # destroys the input here
else:
new_conds = []
for cond in flat_conds:
for val in vals:
new_cond = deepcopy(cond)
new_cond[var] = val
new_conds.append(new_cond)
flat_conds = new_conds
return flat_conds
return multiply_out(condition)
def translate_strips_conditions(conditions, dictionary, ranges,
mutex_dict, mutex_ranges):
if not conditions:
return [{}] # Quick exit for common case.
# Check if the condition violates any mutexes.
if translate_strips_conditions_aux(conditions, mutex_dict,
mutex_ranges) is None:
return None
return translate_strips_conditions_aux(conditions, dictionary, ranges)
def translate_strips_operator(operator, dictionary, ranges, mutex_dict,
mutex_ranges, implied_facts):
conditions = translate_strips_conditions(operator.precondition, dictionary,
ranges, mutex_dict, mutex_ranges)
if conditions is None:
return []
sas_operators = []
for condition in conditions:
op = translate_strips_operator_aux(operator, dictionary, ranges,
mutex_dict, mutex_ranges,
implied_facts, condition)
if op is not None:
sas_operators.append(op)
return sas_operators
def negate_and_translate_condition(condition, dictionary, ranges, mutex_dict,
mutex_ranges):
# condition is a list of lists of literals (DNF)
# the result is the negation of the condition in DNF in
# finite-domain representation (a list of dictionaries that map
# variables to values)
negation = []
if [] in condition: # condition always satisfied
return None # negation unsatisfiable
for combination in product(*condition):
cond = [l.negate() for l in combination]
cond = translate_strips_conditions(cond, dictionary, ranges,
mutex_dict, mutex_ranges)
if cond is not None:
negation.extend(cond)
return negation if negation else None
def translate_strips_operator_aux(operator, dictionary, ranges, mutex_dict,
mutex_ranges, implied_facts, condition):
# collect all add effects
effects_by_variable = defaultdict(lambda: defaultdict(list))
# effects_by_variables: var -> val -> list(FDR conditions)
add_conds_by_variable = defaultdict(list)
for conditions, fact in operator.add_effects:
eff_condition_list = translate_strips_conditions(conditions, dictionary,
ranges, mutex_dict,
mutex_ranges)
if eff_condition_list is None: # Impossible condition for this effect.
continue
for var, val in dictionary[fact]:
effects_by_variable[var][val].extend(eff_condition_list)
add_conds_by_variable[var].append(conditions)
# collect all del effects
del_effects_by_variable = defaultdict(lambda: defaultdict(list))
for conditions, fact in operator.del_effects:
eff_condition_list = translate_strips_conditions(conditions, dictionary,
ranges, mutex_dict,
mutex_ranges)
if eff_condition_list is None: # Impossible condition for this effect.
continue
for var, val in dictionary[fact]:
del_effects_by_variable[var][val].extend(eff_condition_list)
# add effect var=none_of_those for all del effects with the additional
# condition that the deleted value has been true and no add effect triggers
for var in del_effects_by_variable:
no_add_effect_condition = negate_and_translate_condition(
add_conds_by_variable[var], dictionary, ranges, mutex_dict,
mutex_ranges)
if no_add_effect_condition is None: # there is always an add effect
continue
none_of_those = ranges[var] - 1
for val, conds in del_effects_by_variable[var].items():
for cond in conds:
# add guard
if var in cond and cond[var] != val:
continue # condition inconsistent with deleted atom
cond[var] = val
# add condition that no add effect triggers
for no_add_cond in no_add_effect_condition:
new_cond = dict(cond)
# This is a rather expensive step. We try every no_add_cond
# with every condition of the delete effect and discard the
# overal combination if it is unsatisfiable. Since
# no_add_effect_condition is precomputed it can contain many
# no_add_conds in which a certain literal occurs. So if cond
# plus the literal is already unsatisfiable, we still try
# all these combinations. A possible optimization would be
# to re-compute no_add_effect_condition for every delete
# effect and to unfold the product(*condition) in
# negate_and_translate_condition to allow an early break.
for cvar, cval in no_add_cond.items():
if cvar in new_cond and new_cond[cvar] != cval:
# the del effect condition plus the deleted atom
# imply that some add effect on the variable
# triggers
break
new_cond[cvar] = cval
else:
effects_by_variable[var][none_of_those].append(new_cond)
return build_sas_operator(operator.name, condition, effects_by_variable,
operator.cost, ranges, implied_facts)
def build_sas_operator(name, condition, effects_by_variable, cost, ranges,
implied_facts):
if options.add_implied_preconditions:
implied_precondition = set()
for fact in condition.items():
implied_precondition.update(implied_facts[fact])
prevail_and_pre = dict(condition)
pre_post = []
for var, effects_on_var in effects_by_variable.items():
orig_pre = condition.get(var, -1)
added_effect = False
for post, eff_conditions in effects_on_var.items():
pre = orig_pre
# if the effect does not change the variable value, we ignore it
if pre == post:
continue
eff_condition_lists = [sorted(eff_cond.items())
for eff_cond in eff_conditions]
if ranges[var] == 2:
# Apply simplifications for binary variables.
if prune_stupid_effect_conditions(var, post,
eff_condition_lists,
effects_on_var):
global simplified_effect_condition_counter
simplified_effect_condition_counter += 1
if (options.add_implied_preconditions and pre == -1 and
(var, 1 - post) in implied_precondition):
global added_implied_precondition_counter
added_implied_precondition_counter += 1
pre = 1 - post
for eff_condition in eff_condition_lists:
# we do not need to represent a precondition as effect condition
# and we do not want to keep an effect whose condition contradicts
# a pre- or prevail condition
filtered_eff_condition = []
eff_condition_contradicts_precondition = False
for variable, value in eff_condition:
if variable in prevail_and_pre:
if prevail_and_pre[variable] != value:
eff_condition_contradicts_precondition = True
break
else:
filtered_eff_condition.append((variable, value))
if eff_condition_contradicts_precondition:
continue
pre_post.append((var, pre, post, filtered_eff_condition))
added_effect = True
if added_effect:
# the condition on var is not a prevail condition but a
# precondition, so we remove it from the prevail condition
condition.pop(var, -1)
if not pre_post: # operator is noop
return None
prevail = list(condition.items())
return sas_tasks.SASOperator(name, prevail, pre_post, cost)
def prune_stupid_effect_conditions(var, val, conditions, effects_on_var):
## (IF <conditions> THEN <var> := <val>) is a conditional effect.
## <var> is guaranteed to be a binary variable.
## <conditions> is in DNF representation (list of lists).
##
## We simplify <conditions> by applying two rules:
## 1. Conditions of the form "var = dualval" where var is the
## effect variable and dualval != val can be omitted.
## (If var != dualval, then var == val because it is binary,
## which means that in such situations the effect is a no-op.)
## The condition can only be omitted if there is no effect
## producing dualval (see issue736).
## 2. If conditions contains any empty list, it is equivalent
## to True and we can remove all other disjuncts.
##
## returns True when anything was changed
if conditions == [[]]:
return False # Quick exit for common case.
assert val in [0, 1]
dual_val = 1 - val
dual_fact = (var, dual_val)
if dual_val in effects_on_var:
return False
simplified = False
for condition in conditions:
# Apply rule 1.
while dual_fact in condition:
# print "*** Removing dual condition"
simplified = True
condition.remove(dual_fact)
# Apply rule 2.
if not condition:
conditions[:] = [[]]
simplified = True
break
return simplified
def translate_strips_axiom(axiom, dictionary, ranges, mutex_dict, mutex_ranges):
conditions = translate_strips_conditions(axiom.condition, dictionary,
ranges, mutex_dict, mutex_ranges)
if conditions is None:
return []
if axiom.effect.negated:
[(var, _)] = dictionary[axiom.effect.positive()]
effect = (var, ranges[var] - 1)
else:
[effect] = dictionary[axiom.effect]
axioms = []
for condition in conditions:
axioms.append(sas_tasks.SASAxiom(condition.items(), effect))
return axioms
def translate_strips_operators(actions, strips_to_sas, ranges, mutex_dict,
mutex_ranges, implied_facts):
result = []
for action in actions:
sas_ops = translate_strips_operator(action, strips_to_sas, ranges,
mutex_dict, mutex_ranges,
implied_facts)
result.extend(sas_ops)
return result
def translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict,
mutex_ranges):
result = []
for axiom in axioms:
sas_axioms = translate_strips_axiom(axiom, strips_to_sas, ranges,
mutex_dict, mutex_ranges)
result.extend(sas_axioms)
return result
def dump_task(init, goals, actions, axioms, axiom_layer_dict):
old_stdout = sys.stdout
with open("output.dump", "w") as dump_file:
sys.stdout = dump_file
print("Initial state")
for atom in init:
print(atom)
print()
print("Goals")
for goal in goals:
print(goal)
for action in actions:
print()
print("Action")
action.dump()
for axiom in axioms:
print()
print("Axiom")
axiom.dump()
print()
print("Axiom layers")
for atom, layer in axiom_layer_dict.items():
print("%s: layer %d" % (atom, layer))
sys.stdout = old_stdout
def translate_task(strips_to_sas, ranges, translation_key,
mutex_dict, mutex_ranges, mutex_key,
init, goals,
actions, axioms, metric, implied_facts):
with timers.timing("Processing axioms", block=True):
axioms, axiom_init, axiom_layer_dict = axiom_rules.handle_axioms(
actions, axioms, goals)
init = init + axiom_init
if options.dump_task:
# Remove init facts that don't occur in strips_to_sas: they're constant.
nonconstant_init = filter(strips_to_sas.get, init)
dump_task(nonconstant_init, goals, actions, axioms, axiom_layer_dict)
init_values = [rang - 1 for rang in ranges]
# Closed World Assumption: Initialize to "range - 1" == Nothing.
for fact in init:
pairs = strips_to_sas.get(fact, []) # empty for static init facts
for var, val in pairs:
curr_val = init_values[var]
if curr_val != ranges[var] - 1 and curr_val != val:
assert False, "Inconsistent init facts! [fact = %s]" % fact
init_values[var] = val
init = sas_tasks.SASInit(init_values)
goal_dict_list = translate_strips_conditions(goals, strips_to_sas, ranges,
mutex_dict, mutex_ranges)
if goal_dict_list is None:
# "None" is a signal that the goal is unreachable because it
# violates a mutex.
return unsolvable_sas_task("Goal violates a mutex")
assert len(goal_dict_list) == 1, "Negative goal not supported"
## we could substitute the negative goal literal in
## normalize.substitute_complicated_goal, using an axiom. We currently
## don't do this, because we don't run into this assertion, if the
## negative goal is part of finite domain variable with only two
## values, which is most of the time the case, and hence refrain from
## introducing axioms (that are not supported by all heuristics)
goal_pairs = list(goal_dict_list[0].items())
if not goal_pairs:
return solvable_sas_task("Empty goal")
goal = sas_tasks.SASGoal(goal_pairs)
operators = translate_strips_operators(actions, strips_to_sas, ranges,
mutex_dict, mutex_ranges,
implied_facts)
axioms = translate_strips_axioms(axioms, strips_to_sas, ranges, mutex_dict,
mutex_ranges)
axiom_layers = [-1] * len(ranges)
for atom, layer in axiom_layer_dict.items():
assert layer >= 0
[(var, val)] = strips_to_sas[atom]
axiom_layers[var] = layer
variables = sas_tasks.SASVariables(ranges, axiom_layers, translation_key)
mutexes = [sas_tasks.SASMutexGroup(group) for group in mutex_key]
return sas_tasks.SASTask(variables, mutexes, init, goal,
operators, axioms, metric)
def trivial_task(solvable):
variables = sas_tasks.SASVariables(
[2], [-1], [["Atom dummy(val1)", "Atom dummy(val2)"]])
# We create no mutexes: the only possible mutex is between
# dummy(val1) and dummy(val2), but the preprocessor would filter
# it out anyway since it is trivial (only involves one
# finite-domain variable).
mutexes = []
init = sas_tasks.SASInit([0])
if solvable:
goal_fact = (0, 0)
else:
goal_fact = (0, 1)
goal = sas_tasks.SASGoal([goal_fact])
operators = []
axioms = []
metric = True
return sas_tasks.SASTask(variables, mutexes, init, goal,
operators, axioms, metric)
def solvable_sas_task(msg):
print("%s! Generating solvable task..." % msg)
return trivial_task(solvable=True)
def unsolvable_sas_task(msg):
print("%s! Generating unsolvable task..." % msg)
return trivial_task(solvable=False)
def pddl_to_sas(task):
with timers.timing("Instantiating", block=True):
(relaxed_reachable, atoms, actions, axioms,
reachable_action_params) = instantiate.explore(task)
if not relaxed_reachable:
return unsolvable_sas_task("No relaxed solution")
# HACK! Goals should be treated differently.
if isinstance(task.goal, pddl.Conjunction):
goal_list = task.goal.parts
else:
goal_list = [task.goal]
for item in goal_list:
assert isinstance(item, pddl.Literal)
with timers.timing("Computing fact groups", block=True):
groups, mutex_groups, translation_key = fact_groups.compute_groups(
task, atoms, reachable_action_params)
with timers.timing("Building STRIPS to SAS dictionary"):
ranges, strips_to_sas = strips_to_sas_dictionary(
groups, assert_partial=options.use_partial_encoding)
with timers.timing("Building dictionary for full mutex groups"):
mutex_ranges, mutex_dict = strips_to_sas_dictionary(
mutex_groups, assert_partial=False)
if options.add_implied_preconditions:
with timers.timing("Building implied facts dictionary..."):
implied_facts = build_implied_facts(strips_to_sas, groups,
mutex_groups)
else:
implied_facts = {}
with timers.timing("Building mutex information", block=True):
if options.use_partial_encoding:
mutex_key = build_mutex_key(strips_to_sas, mutex_groups)
else:
# With our current representation, emitting complete mutex
# information for the full encoding can incur an
# unacceptable (quadratic) blowup in the task representation
# size. See issue771 for details.
print("using full encoding: between-variable mutex information skipped.")
mutex_key = []
with timers.timing("Translating task", block=True):
sas_task = translate_task(
strips_to_sas, ranges, translation_key,
mutex_dict, mutex_ranges, mutex_key,
task.init, goal_list, actions, axioms, task.use_min_cost_metric,
implied_facts)
print("%d effect conditions simplified" %
simplified_effect_condition_counter)
print("%d implied preconditions added" %
added_implied_precondition_counter)
if options.filter_unreachable_facts:
with timers.timing("Detecting unreachable propositions", block=True):
try:
simplify.filter_unreachable_propositions(sas_task)
except simplify.Impossible:
return unsolvable_sas_task("Simplified to trivially false goal")
except simplify.TriviallySolvable:
return solvable_sas_task("Simplified to empty goal")
if options.reorder_variables or options.filter_unimportant_vars:
with timers.timing("Reordering and filtering variables", block=True):
variable_order.find_and_apply_variable_order(
sas_task, options.reorder_variables,
options.filter_unimportant_vars)
return sas_task
def build_mutex_key(strips_to_sas, groups):
assert options.use_partial_encoding
group_keys = []
for group in groups:
group_key = []
for fact in group:
represented_by = strips_to_sas.get(fact)
if represented_by:
assert len(represented_by) == 1
group_key.append(represented_by[0])
else:
print("not in strips_to_sas, left out:", fact)
group_keys.append(group_key)
return group_keys
def build_implied_facts(strips_to_sas, groups, mutex_groups):
## Compute a dictionary mapping facts (FDR pairs) to lists of FDR
## pairs implied by that fact. In other words, in all states
## containing p, all pairs in implied_facts[p] must also be true.
##
## There are two simple cases where a pair p implies a pair q != p
## in our FDR encodings:
## 1. p and q encode the same fact
## 2. p encodes a STRIPS proposition X, q encodes a STRIPS literal
## "not Y", and X and Y are mutex.
##
## The first case cannot arise when we use partial encodings, and
## when we use full encodings, I don't think it would give us any
## additional information to exploit in the operator translation,
## so we only use the second case.
##
## Note that for a pair q to encode a fact "not Y", Y must form a
## fact group of size 1. We call such propositions Y "lonely".
## In the first step, we compute a dictionary mapping each lonely
## proposition to its variable number.
lonely_propositions = {}
for var_no, group in enumerate(groups):
if len(group) == 1:
lonely_prop = group[0]
assert strips_to_sas[lonely_prop] == [(var_no, 0)]
lonely_propositions[lonely_prop] = var_no
## Then we compute implied facts as follows: for each mutex group,
## check if prop is lonely (then and only then "not prop" has a
## representation as an FDR pair). In that case, all other facts
## in this mutex group imply "not prop".
implied_facts = defaultdict(list)
for mutex_group in mutex_groups:
for prop in mutex_group:
prop_var = lonely_propositions.get(prop)
if prop_var is not None:
prop_is_false = (prop_var, 1)
for other_prop in mutex_group:
if other_prop is not prop:
for other_fact in strips_to_sas[other_prop]:
implied_facts[other_fact].append(prop_is_false)
return implied_facts
def dump_statistics(sas_task):
print("Translator variables: %d" % len(sas_task.variables.ranges))
print("Translator derived variables: %d" %
len([layer for layer in sas_task.variables.axiom_layers
if layer >= 0]))
print("Translator facts: %d" % sum(sas_task.variables.ranges))
print("Translator goal facts: %d" % len(sas_task.goal.pairs))
print("Translator mutex groups: %d" % len(sas_task.mutexes))
print("Translator total mutex groups size: %d" %
sum(mutex.get_encoding_size() for mutex in sas_task.mutexes))
print("Translator operators: %d" % len(sas_task.operators))
print("Translator axioms: %d" % len(sas_task.axioms))
print("Translator task size: %d" % sas_task.get_encoding_size())
try:
peak_memory = tools.get_peak_memory_in_kb()
except Warning as warning:
print(warning)
else:
print("Translator peak memory: %d KB" % peak_memory)
def main():
timer = timers.Timer()
with timers.timing("Parsing", True):
task = pddl_parser.open(
domain_filename=options.domain, task_filename=options.task)
with timers.timing("Normalizing task"):
normalize.normalize(task)
if options.generate_relaxed_task:
# Remove delete effects.
for action in task.actions:
for index, effect in reversed(list(enumerate(action.effects))):
if effect.literal.negated:
del action.effects[index]
sas_task = pddl_to_sas(task)
dump_statistics(sas_task)
with timers.timing("Writing output"):
with open(options.sas_file, "w") as output_file:
sas_task.output(output_file)
print("Done! %s" % timer)
def handle_sigxcpu(signum, stackframe):
print()
print("Translator hit the time limit")
# sys.exit() is not safe to be called from within signal handlers, but
# os._exit() is.
os._exit(TRANSLATE_OUT_OF_TIME)
if __name__ == "__main__":
try:
signal.signal(signal.SIGXCPU, handle_sigxcpu)
except AttributeError:
print("Warning! SIGXCPU is not available on your platform. "
"This means that the planner cannot be gracefully terminated "
"when using a time limit, which, however, is probably "
"supported on your platform anyway.")
try:
# Reserve about 10 MB of emergency memory.
# https://stackoverflow.com/questions/19469608/
emergency_memory = b"x" * 10 ** 7
main()
except MemoryError:
del emergency_memory
print()
print("Translator ran out of memory, traceback:")
print("=" * 79)
traceback.print_exc(file=sys.stdout)
print("=" * 79)
sys.exit(TRANSLATE_OUT_OF_MEMORY)
| 31,059 | 41.02977 | 85 | py |
DAAISy | DAAISy-main/src/utils/translate/graph.py | #! /usr/bin/env python3
class Graph:
def __init__(self, nodes):
self.nodes = nodes
self.neighbours = {u: set() for u in nodes}
def connect(self, u, v):
self.neighbours[u].add(v)
self.neighbours[v].add(u)
def connected_components(self):
remaining_nodes = set(self.nodes)
result = []
def dfs(node):
result[-1].append(node)
remaining_nodes.remove(node)
for neighbour in self.neighbours[node]:
if neighbour in remaining_nodes:
dfs(neighbour)
while remaining_nodes:
node = next(iter(remaining_nodes))
result.append([])
dfs(node)
result[-1].sort()
return sorted(result)
def transitive_closure(pairs):
# Warshall's algorithm.
result = set(pairs)
nodes = {u for (u, v) in pairs} | {v for (u, v) in pairs}
for k in nodes:
for i in nodes:
for j in nodes:
if (i, j) not in result and (i, k) in result and (k, j) in result:
result.add((i, j))
return sorted(result)
if __name__ == "__main__":
g = Graph([1, 2, 3, 4, 5, 6])
g.connect(1, 2)
g.connect(1, 3)
g.connect(4, 5)
print(g.connected_components())
| 1,300 | 25.02 | 82 | py |
DAAISy | DAAISy-main/src/utils/translate/fact_groups.py | from . import invariant_finder
from . import options
from . import pddl_fd as pddl
from . import timers
DEBUG = False
def expand_group(group, task, reachable_facts):
result = []
for fact in group:
try:
pos = list(fact.args).index("?X")
except ValueError:
result.append(fact)
else:
# NOTE: This could be optimized by only trying objects of the correct
# type, or by using a unifier which directly generates the
# applicable objects. It is not worth optimizing this at this stage,
# though.
for obj in task.objects:
newargs = list(fact.args)
newargs[pos] = obj.name
atom = pddl.Atom(fact.predicate, newargs)
if atom in reachable_facts:
result.append(atom)
return result
def instantiate_groups(groups, task, reachable_facts):
return [expand_group(group, task, reachable_facts) for group in groups]
class GroupCoverQueue:
def __init__(self, groups):
if groups:
self.max_size = max([len(group) for group in groups])
self.groups_by_size = [[] for i in range(self.max_size + 1)]
self.groups_by_fact = {}
for group in groups:
group = set(group) # Copy group, as it will be modified.
self.groups_by_size[len(group)].append(group)
for fact in group:
self.groups_by_fact.setdefault(fact, []).append(group)
self._update_top()
else:
self.max_size = 0
def __bool__(self):
return self.max_size > 1
__nonzero__ = __bool__
def pop(self):
result = list(self.top) # Copy; this group will shrink further.
if options.use_partial_encoding:
for fact in result:
for group in self.groups_by_fact[fact]:
group.remove(fact)
self._update_top()
return result
def _update_top(self):
while self.max_size > 1:
max_list = self.groups_by_size[self.max_size]
while max_list:
candidate = max_list.pop()
if len(candidate) == self.max_size:
self.top = candidate
return
self.groups_by_size[len(candidate)].append(candidate)
self.max_size -= 1
def choose_groups(groups, reachable_facts):
queue = GroupCoverQueue(groups)
uncovered_facts = reachable_facts.copy()
result = []
while queue:
group = queue.pop()
uncovered_facts.difference_update(group)
result.append(group)
print(len(uncovered_facts), "uncovered facts")
result += [[fact] for fact in uncovered_facts]
return result
def build_translation_key(groups):
group_keys = []
for group in groups:
group_key = [str(fact) for fact in group]
if len(group) == 1:
group_key.append(str(group[0].negate()))
else:
group_key.append("<none of those>")
group_keys.append(group_key)
return group_keys
def collect_all_mutex_groups(groups, atoms):
# NOTE: This should be functionally identical to choose_groups
# when partial_encoding is set to False. Maybe a future
# refactoring could take that into account.
all_groups = []
uncovered_facts = atoms.copy()
for group in groups:
uncovered_facts.difference_update(group)
all_groups.append(group)
all_groups += [[fact] for fact in uncovered_facts]
return all_groups
def sort_groups(groups):
return sorted(sorted(group) for group in groups)
def compute_groups(task, atoms, reachable_action_params):
groups = invariant_finder.get_groups(task, reachable_action_params)
with timers.timing("Instantiating groups"):
groups = instantiate_groups(groups, task, atoms)
# Sort here already to get deterministic mutex groups.
groups = sort_groups(groups)
# TODO: I think that collect_all_mutex_groups should do the same thing
# as choose_groups with partial_encoding=False, so these two should
# be unified.
with timers.timing("Collecting mutex groups"):
mutex_groups = collect_all_mutex_groups(groups, atoms)
with timers.timing("Choosing groups", block=True):
groups = choose_groups(groups, atoms)
groups = sort_groups(groups)
with timers.timing("Building translation key"):
translation_key = build_translation_key(groups)
if DEBUG:
for group in groups:
if len(group) >= 2:
print("{%s}" % ", ".join(map(str, group)))
return groups, mutex_groups, translation_key
| 4,749 | 32.450704 | 86 | py |
DAAISy | DAAISy-main/src/utils/translate/timers.py | import contextlib
import os
import sys
import time
class Timer:
def __init__(self):
self.start_time = time.time()
self.start_clock = self._clock()
def _clock(self):
times = os.times()
return times[0] + times[1]
def __str__(self):
return "[%.3fs CPU, %.3fs wall-clock]" % (
self._clock() - self.start_clock,
time.time() - self.start_time)
@contextlib.contextmanager
def timing(text, block=False):
timer = Timer()
if block:
print("%s..." % text)
else:
print("%s..." % text, end=' ')
sys.stdout.flush()
yield
if block:
print("%s: %s" % (text, timer))
else:
print(timer)
sys.stdout.flush()
| 732 | 19.361111 | 50 | py |
DAAISy | DAAISy-main/src/utils/translate/tools.py | def cartesian_product(sequences):
# TODO: Rename this. It's not good that we have two functions
# called "product" and "cartesian_product", of which "product"
# computes cartesian products, while "cartesian_product" does not.
# This isn't actually a proper cartesian product because we
# concatenate lists, rather than forming sequences of atomic elements.
# We could probably also use something like
# map(itertools.chain, product(*sequences))
# but that does not produce the same results
if not sequences:
yield []
else:
temp = list(cartesian_product(sequences[1:]))
for item in sequences[0]:
for sequence in temp:
yield item + sequence
def get_peak_memory_in_kb():
try:
# This will only work on Linux systems.
with open("/proc/self/status") as status_file:
for line in status_file:
parts = line.split()
if parts[0] == "VmPeak:":
return int(parts[1])
except OSError:
pass
raise Warning("warning: could not determine peak memory")
| 1,127 | 35.387097 | 74 | py |
DAAISy | DAAISy-main/src/utils/translate/options.py | import argparse
import sys
def parse_args():
argparser = argparse.ArgumentParser()
argparser.add_argument(
"domain", help="path to domain pddl file")
argparser.add_argument(
"task", help="path to task pddl file")
argparser.add_argument(
"--relaxed", dest="generate_relaxed_task", action="store_true",
help="output relaxed task (no delete effects)")
argparser.add_argument(
"--full-encoding",
dest="use_partial_encoding", action="store_false",
help="By default we represent facts that occur in multiple "
"mutex groups only in one variable. Using this parameter adds "
"these facts to multiple variables. This can make the meaning "
"of the variables clearer, but increases the number of facts.")
argparser.add_argument(
"--invariant-generation-max-candidates", default=100000, type=int,
help="max number of candidates for invariant generation "
"(default: %(default)d). Set to 0 to disable invariant "
"generation and obtain only binary variables. The limit is "
"needed for grounded input files that would otherwise produce "
"too many candidates.")
argparser.add_argument(
"--sas-file", default="output.sas",
help="path to the SAS output file (default: %(default)s)")
argparser.add_argument(
"--invariant-generation-max-time", default=300, type=int,
help="max time for invariant generation (default: %(default)ds)")
argparser.add_argument(
"--add-implied-preconditions", action="store_true",
help="infer additional preconditions. This setting can cause a "
"severe performance penalty due to weaker relevance analysis "
"(see issue7).")
argparser.add_argument(
"--keep-unreachable-facts",
dest="filter_unreachable_facts", action="store_false",
help="keep facts that can't be reached from the initial state")
argparser.add_argument(
"--skip-variable-reordering",
dest="reorder_variables", action="store_false",
help="do not reorder variables based on the causal graph. Do not use "
"this option with the causal graph heuristic!")
argparser.add_argument(
"--keep-unimportant-variables",
dest="filter_unimportant_vars", action="store_false",
help="keep variables that do not influence the goal in the causal graph")
argparser.add_argument(
"--dump-task", action="store_true",
help="dump human-readable SAS+ representation of the task")
return argparser.parse_args()
def copy_args_to_module(args):
module_dict = sys.modules[__name__].__dict__
for key, value in vars(args).items():
module_dict[key] = value
def setup():
args = parse_args()
copy_args_to_module(args)
# [generalized-learning]: Disabled
# setup()
| 2,933 | 40.914286 | 81 | py |
DAAISy | DAAISy-main/src/utils/translate/simplify.py | """This module contains a function for simplifying tasks in
finite-domain representation (SASTask). Usage:
simplify.filter_unreachable_propositions(sas_task)
simplifies `sas_task` in-place. If simplification detects that the
task is unsolvable, the function raises `simplify.Impossible`. If it
detects that is has an empty goal, the function raises
`simplify.TriviallySolvable`.
The simplification procedure generates DTGs for the task and then
removes facts that are unreachable from the initial state in a DTG.
Note that such unreachable facts can exist even though we perform a
relaxed reachability analysis before grounding (and DTG reachability
is weaker than relaxed reachability) because the previous relaxed
reachability does not take into account any mutex information, while
PDDL-to-SAS conversion gets rid of certain operators that cannot be
applicable given the mutex information.
Despite the name, the method touches more than the set of facts. For
example, operators that have preconditions on pruned facts are
removed, too. (See also the docstring of
filter_unreachable_propositions.)
"""
from collections import defaultdict
from itertools import count
from . import sas_tasks
DEBUG = False
# TODO:
# This is all quite hackish and would be easier if the translator were
# restructured so that more information is immediately available for
# the propositions, and if propositions had more structure. Directly
# working with int pairs is awkward.
class DomainTransitionGraph:
"""Domain transition graphs.
Attributes:
- init (int): the initial state value of the DTG variable
- size (int): the number of values in the domain
- arcs (defaultdict: int -> set(int)): the DTG arcs (unlabeled)
There are no transition labels or goal values.
The intention is that nodes are represented as ints in {1, ...,
domain_size}, but this is not enforced.
For derived variables, the "fallback value" that is produced by
negation by failure should be used for `init`, so that it is
always considered reachable.
"""
def __init__(self, init, size):
"""Create a DTG with no arcs."""
self.init = init
self.size = size
self.arcs = defaultdict(set)
def add_arc(self, u, v):
"""Add an arc from u to v."""
self.arcs[u].add(v)
def reachable(self):
"""Return the values reachable from the initial value.
Represented as a set(int)."""
queue = [self.init]
reachable = set(queue)
while queue:
node = queue.pop()
new_neighbors = self.arcs.get(node, set()) - reachable
reachable |= new_neighbors
queue.extend(new_neighbors)
return reachable
def dump(self):
"""Dump the DTG."""
print("DTG size:", self.size)
print("DTG init value:", self.init)
print("DTG arcs:")
for source, destinations in sorted(self.arcs.items()):
for destination in sorted(destinations):
print(" %d => %d" % (source, destination))
def build_dtgs(task):
"""Build DTGs for all variables of the SASTask `task`.
Return a list(DomainTransitionGraph), one for each variable.
For derived variables, we do not consider the axiom bodies, i.e.,
we treat each axiom as if it were an operator with no
preconditions. In the case where the only derived variables used
are binary and all rules change the value from the default value
to the non-default value, this results in the correct DTG.
Otherwise, at worst it results in an overapproximation, which
would not threaten correctness."""
init_vals = task.init.values
sizes = task.variables.ranges
dtgs = [DomainTransitionGraph(init, size)
for (init, size) in zip(init_vals, sizes)]
def add_arc(var_no, pre_spec, post):
"""Add a DTG arc for var_no induced by transition pre_spec -> post.
pre_spec may be -1, in which case arcs from every value
other than post are added."""
if pre_spec == -1:
pre_values = set(range(sizes[var_no])).difference([post])
else:
pre_values = [pre_spec]
for pre in pre_values:
dtgs[var_no].add_arc(pre, post)
def get_effective_pre(var_no, conditions, effect_conditions):
"""Return combined information on the conditions on `var_no`
from operator conditions and effect conditions.
- conditions: dict(int -> int) containing the combined
operator prevail and preconditions
- effect_conditions: list(pair(int, int)) containing the
effect conditions
Result:
- -1 if there is no condition on var_no
- val if there is a unique condition var_no=val
- None if there are contradictory conditions on var_no"""
result = conditions.get(var_no, -1)
for cond_var_no, cond_val in effect_conditions:
if cond_var_no == var_no:
if result == -1:
# This is the first condition on var_no.
result = cond_val
elif cond_val != result:
# We have contradictory conditions on var_no.
return None
return result
for op in task.operators:
conditions = dict(op.get_applicability_conditions())
for var_no, _, post, cond in op.pre_post:
effective_pre = get_effective_pre(var_no, conditions, cond)
if effective_pre is not None:
add_arc(var_no, effective_pre, post)
for axiom in task.axioms:
var_no, val = axiom.effect
add_arc(var_no, -1, val)
return dtgs
always_false = object()
always_true = object()
class Impossible(Exception):
pass
class TriviallySolvable(Exception):
pass
class DoesNothing(Exception):
pass
class VarValueRenaming:
def __init__(self):
self.new_var_nos = [] # indexed by old var_no
self.new_values = [] # indexed by old var_no and old value
self.new_sizes = [] # indexed by new var_no
self.new_var_count = 0
self.num_removed_values = 0
def dump(self):
old_var_count = len(self.new_var_nos)
print("variable count: %d => %d" % (
old_var_count, self.new_var_count))
print("number of removed values: %d" % self.num_removed_values)
print("variable conversions:")
for old_var_no, (new_var_no, new_values) in enumerate(
zip(self.new_var_nos, self.new_values)):
old_size = len(new_values)
if new_var_no is None:
print("variable %d [size %d] => removed" % (
old_var_no, old_size))
else:
new_size = self.new_sizes[new_var_no]
print("variable %d [size %d] => %d [size %d]" % (
old_var_no, old_size, new_var_no, new_size))
for old_value, new_value in enumerate(new_values):
if new_value is always_false:
new_value = "always false"
elif new_value is always_true:
new_value = "always true"
print(" value %d => %s" % (old_value, new_value))
def register_variable(self, old_domain_size, init_value, new_domain):
assert 1 <= len(new_domain) <= old_domain_size
assert init_value in new_domain
if len(new_domain) == 1:
# Remove this variable completely.
new_values_for_var = [always_false] * old_domain_size
new_values_for_var[init_value] = always_true
self.new_var_nos.append(None)
self.new_values.append(new_values_for_var)
self.num_removed_values += old_domain_size
else:
new_value_counter = count()
new_values_for_var = []
for value in range(old_domain_size):
if value in new_domain:
new_values_for_var.append(next(new_value_counter))
else:
self.num_removed_values += 1
new_values_for_var.append(always_false)
new_size = next(new_value_counter)
assert new_size == len(new_domain)
self.new_var_nos.append(self.new_var_count)
self.new_values.append(new_values_for_var)
self.new_sizes.append(new_size)
self.new_var_count += 1
def apply_to_task(self, task):
if DEBUG:
self.dump()
self.apply_to_variables(task.variables)
self.apply_to_mutexes(task.mutexes)
self.apply_to_init(task.init)
self.apply_to_goals(task.goal.pairs)
self.apply_to_operators(task.operators)
self.apply_to_axioms(task.axioms)
def apply_to_variables(self, variables):
variables.ranges = self.new_sizes
new_axiom_layers = [None] * self.new_var_count
for old_no, new_no in enumerate(self.new_var_nos):
if new_no is not None:
new_axiom_layers[new_no] = variables.axiom_layers[old_no]
assert None not in new_axiom_layers
variables.axiom_layers = new_axiom_layers
self.apply_to_value_names(variables.value_names)
def apply_to_value_names(self, value_names):
new_value_names = [[None] * size for size in self.new_sizes]
for var_no, values in enumerate(value_names):
for value, value_name in enumerate(values):
new_var_no, new_value = self.translate_pair((var_no, value))
if new_value is always_true:
if DEBUG:
print("Removed true proposition: %s" % value_name)
elif new_value is always_false:
if DEBUG:
print("Removed false proposition: %s" % value_name)
else:
new_value_names[new_var_no][new_value] = value_name
assert all((None not in value_names) for value_names in new_value_names)
value_names[:] = new_value_names
def apply_to_mutexes(self, mutexes):
new_mutexes = []
for mutex in mutexes:
new_facts = []
for var, val in mutex.facts:
new_var_no, new_value = self.translate_pair((var, val))
if (new_value is not always_true and
new_value is not always_false):
new_facts.append((new_var_no, new_value))
if len(new_facts) >= 2:
mutex.facts = new_facts
new_mutexes.append(mutex)
mutexes[:] = new_mutexes
def apply_to_init(self, init):
init_pairs = list(enumerate(init.values))
try:
self.convert_pairs(init_pairs)
except Impossible:
assert False, "Initial state impossible? Inconceivable!"
new_values = [None] * self.new_var_count
for new_var_no, new_value in init_pairs:
new_values[new_var_no] = new_value
assert None not in new_values
init.values = new_values
def apply_to_goals(self, goals):
# This may propagate Impossible up.
self.convert_pairs(goals)
if not goals:
# We raise an exception because we do not consider a SAS+
# task without goals well-formed. Our callers are supposed
# to catch this and replace the task with a well-formed
# trivially solvable task.
raise TriviallySolvable
def apply_to_operators(self, operators):
new_operators = []
num_removed = 0
for op in operators:
new_op = self.translate_operator(op)
if new_op is None:
num_removed += 1
if DEBUG:
print("Removed operator: %s" % op.name)
else:
new_operators.append(new_op)
print("%d operators removed" % num_removed)
operators[:] = new_operators
def apply_to_axioms(self, axioms):
new_axioms = []
num_removed = 0
for axiom in axioms:
try:
self.apply_to_axiom(axiom)
except (Impossible, DoesNothing):
num_removed += 1
if DEBUG:
print("Removed axiom:")
axiom.dump()
else:
new_axioms.append(axiom)
print("%d axioms removed" % num_removed)
axioms[:] = new_axioms
def translate_operator(self, op):
"""Compute a new operator from op where the var/value renaming has
been applied. Return None if op should be pruned (because it
is always inapplicable or has no effect.)"""
# We do not call this apply_to_operator, breaking the analogy
# with the other methods, because it creates a new operator
# rather than transforming in-place. The reason for this is
# that it would be quite difficult to generate the operator
# in-place.
# This method is trickier than it may at first appear. For
# example, pre_post values should be fully sorted (see
# documentation in the sas_tasks module), and pruning effect
# conditions from a conditional effects can break this sort
# order. Recreating the operator from scratch solves this
# because the pre_post entries are sorted by
# SASOperator.__init__.
# Also, when we detect a pre_post pair where the effect part
# can never trigger, the precondition part is still important,
# but may be demoted to a prevail condition. Whether or not
# this happens depends on the presence of other pre_post
# entries for the same variable. We solve this by computing
# the sorting into prevail vs. preconditions from scratch, too.
applicability_conditions = op.get_applicability_conditions()
try:
self.convert_pairs(applicability_conditions)
except Impossible:
# The operator is never applicable.
return None
conditions_dict = dict(applicability_conditions)
new_prevail_vars = set(conditions_dict)
new_pre_post = []
for entry in op.pre_post:
new_entry = self.translate_pre_post(entry, conditions_dict)
if new_entry is not None:
new_pre_post.append(new_entry)
# Mark the variable in the entry as not prevailed.
new_var = new_entry[0]
new_prevail_vars.discard(new_var)
if not new_pre_post:
# The operator has no effect.
return None
new_prevail = sorted(
(var, value)
for (var, value) in conditions_dict.items()
if var in new_prevail_vars)
return sas_tasks.SASOperator(
name=op.name, prevail=new_prevail, pre_post=new_pre_post,
cost=op.cost)
def apply_to_axiom(self, axiom):
# The following line may generate an Impossible exception,
# which is propagated up.
self.convert_pairs(axiom.condition)
new_var, new_value = self.translate_pair(axiom.effect)
# If the new_value is always false, then the condition must
# have been impossible.
assert new_value is not always_false
if new_value is always_true:
raise DoesNothing
axiom.effect = new_var, new_value
def translate_pre_post(self, pre_post_entry, conditions_dict):
"""Return a translated version of a pre_post entry.
If the entry never causes a value change, return None.
(It might seem that a possible precondition part of pre_post
gets lost in this case, but pre_post entries that become
prevail conditions are handled elsewhere.)
conditions_dict contains all applicability conditions
(prevail/pre) of the operator, already converted. This is
used to detect effect conditions that can never fire.
The method may assume that the operator remains reachable,
i.e., that it does not have impossible preconditions, as these
are already checked elsewhere.
Possible cases:
- effect is always_true => return None
- effect equals prevailed value => return None
- effect condition is impossible given operator applicability
condition => return None
- otherwise => return converted pre_post tuple
"""
var_no, pre, post, cond = pre_post_entry
new_var_no, new_post = self.translate_pair((var_no, post))
if new_post is always_true:
return None
if pre == -1:
new_pre = -1
else:
_, new_pre = self.translate_pair((var_no, pre))
assert new_pre is not always_false, (
"This function should only be called for operators "
"whose applicability conditions are deemed possible.")
if new_post == new_pre:
return None
new_cond = list(cond)
try:
self.convert_pairs(new_cond)
except Impossible:
# The effect conditions can never be satisfied.
return None
for cond_var, cond_value in new_cond:
if (cond_var in conditions_dict and
conditions_dict[cond_var] != cond_value):
# This effect condition is not compatible with
# the applicability conditions.
return None
assert new_post is not always_false, (
"if we survived so far, this effect can trigger "
"(as far as our analysis can determine this), "
"and then new_post cannot be always_false")
assert new_pre is not always_true, (
"if this pre_post changes the value and can fire, "
"new_pre cannot be always_true")
return new_var_no, new_pre, new_post, new_cond
def translate_pair(self, fact_pair):
(var_no, value) = fact_pair
new_var_no = self.new_var_nos[var_no]
new_value = self.new_values[var_no][value]
return new_var_no, new_value
def convert_pairs(self, pairs):
# We call this convert_... because it is an in-place method.
new_pairs = []
for pair in pairs:
new_var_no, new_value = self.translate_pair(pair)
if new_value is always_false:
raise Impossible
elif new_value is not always_true:
assert new_var_no is not None
new_pairs.append((new_var_no, new_value))
pairs[:] = new_pairs
def build_renaming(dtgs):
renaming = VarValueRenaming()
for dtg in dtgs:
renaming.register_variable(dtg.size, dtg.init, dtg.reachable())
return renaming
def filter_unreachable_propositions(sas_task):
"""We remove unreachable propositions and then prune variables
with only one value.
Examples of things that are pruned:
- Constant propositions that are not detected in instantiate.py
because instantiate.py only reasons at the predicate level, and some
predicates such as "at" in Depot are constant for some objects
(hoists), but not others (trucks).
Example: "at(hoist1, distributor0)" and the associated variable
in depots-01.
- "none of those" values that are unreachable.
Example: at(truck1, ?x) = <none of those> in depots-01.
- Certain values that are relaxed reachable but detected as
unreachable after SAS instantiation because the only operators
that set them have inconsistent preconditions.
Example: on(crate0, crate0) in depots-01.
"""
if DEBUG:
sas_task.validate()
dtgs = build_dtgs(sas_task)
renaming = build_renaming(dtgs)
# apply_to_task may raise Impossible if the goal is detected as
# unreachable or TriviallySolvable if it has no goal. We let the
# exceptions propagate to the caller.
renaming.apply_to_task(sas_task)
print("%d propositions removed" % renaming.num_removed_values)
if DEBUG:
sas_task.validate()
| 20,161 | 37.258065 | 80 | py |
DAAISy | DAAISy-main/src/utils/translate/constraints.py | import itertools
class NegativeClause:
# disjunction of inequalities
def __init__(self, parts):
self.parts = parts
assert len(parts)
def __str__(self):
disj = " or ".join(["(%s != %s)" % (v1, v2)
for (v1, v2) in self.parts])
return "(%s)" % disj
def is_satisfiable(self):
for part in self.parts:
if part[0] != part[1]:
return True
return False
def apply_mapping(self, m):
new_parts = [(m.get(v1, v1), m.get(v2, v2)) for (v1, v2) in self.parts]
return NegativeClause(new_parts)
class Assignment:
def __init__(self, equalities):
self.equalities = tuple(equalities)
# represents a conjunction of expressions ?x = ?y or ?x = d
# with ?x, ?y being variables and d being a domain value
self.consistent = None
self.mapping = None
self.eq_classes = None
def __str__(self):
conj = " and ".join(["(%s = %s)" % (v1, v2)
for (v1, v2) in self.equalities])
return "(%s)" % conj
def _compute_equivalence_classes(self):
eq_classes = {}
for (v1, v2) in self.equalities:
c1 = eq_classes.setdefault(v1, {v1})
c2 = eq_classes.setdefault(v2, {v2})
if c1 is not c2:
if len(c2) > len(c1):
v1, c1, v2, c2 = v2, c2, v1, c1
c1.update(c2)
for elem in c2:
eq_classes[elem] = c1
self.eq_classes = eq_classes
def _compute_mapping(self):
if not self.eq_classes:
self._compute_equivalence_classes()
# create mapping: each key is mapped to the smallest
# element in its equivalence class (with objects being
# smaller than variables)
mapping = {}
for eq_class in self.eq_classes.values():
variables = [item for item in eq_class if item.startswith("?")]
constants = [item for item in eq_class if not item.startswith("?")]
if len(constants) >= 2:
self.consistent = False
self.mapping = None
return
if constants:
set_val = constants[0]
else:
set_val = min(variables)
for entry in eq_class:
mapping[entry] = set_val
self.consistent = True
self.mapping = mapping
def is_consistent(self):
if self.consistent is None:
self._compute_mapping()
return self.consistent
def get_mapping(self):
if self.consistent is None:
self._compute_mapping()
return self.mapping
class ConstraintSystem:
def __init__(self):
self.combinatorial_assignments = []
self.neg_clauses = []
def __str__(self):
combinatorial_assignments = []
for comb_assignment in self.combinatorial_assignments:
disj = " or ".join([str(assig) for assig in comb_assignment])
disj = "(%s)" % disj
combinatorial_assignments.append(disj)
assigs = " and\n".join(combinatorial_assignments)
neg_clauses = [str(clause) for clause in self.neg_clauses]
neg_clauses = " and ".join(neg_clauses)
return assigs + "(" + neg_clauses + ")"
def _all_clauses_satisfiable(self, assignment):
mapping = assignment.get_mapping()
for neg_clause in self.neg_clauses:
clause = neg_clause.apply_mapping(mapping)
if not clause.is_satisfiable():
return False
return True
def _combine_assignments(self, assignments):
new_equalities = []
for a in assignments:
new_equalities.extend(a.equalities)
return Assignment(new_equalities)
def add_assignment(self, assignment):
self.add_assignment_disjunction([assignment])
def add_assignment_disjunction(self, assignments):
self.combinatorial_assignments.append(assignments)
def add_negative_clause(self, negative_clause):
self.neg_clauses.append(negative_clause)
def combine(self, other):
"""Combines two constraint systems to a new system"""
combined = ConstraintSystem()
combined.combinatorial_assignments = (self.combinatorial_assignments +
other.combinatorial_assignments)
combined.neg_clauses = self.neg_clauses + other.neg_clauses
return combined
def copy(self):
other = ConstraintSystem()
other.combinatorial_assignments = list(self.combinatorial_assignments)
other.neg_clauses = list(self.neg_clauses)
return other
def dump(self):
print("AssignmentSystem:")
for comb_assignment in self.combinatorial_assignments:
disj = " or ".join([str(assig) for assig in comb_assignment])
print(" ASS: ", disj)
for neg_clause in self.neg_clauses:
print(" NEG: ", str(neg_clause))
def is_solvable(self):
"""Check whether the combinatorial assignments include at least
one consistent assignment under which the negative clauses
are satisfiable"""
for assignments in itertools.product(*self.combinatorial_assignments):
combined = self._combine_assignments(assignments)
if not combined.is_consistent():
continue
if self._all_clauses_satisfiable(combined):
return True
return False
| 5,591 | 33.518519 | 79 | py |
DAAISy | DAAISy-main/src/utils/translate/invariants.py | import itertools
from collections import defaultdict
from . import constraints
from . import pddl_fd as pddl
from . import tools
# Notes:
# All parts of an invariant always use all non-counted variables
# -> the arity of all predicates covered by an invariant is either the
# number of the invariant variables or this value + 1
#
# we currently keep the assumption that each predicate occurs at most once
# in every invariant.
def invert_list(alist):
result = defaultdict(list)
for pos, arg in enumerate(alist):
result[arg].append(pos)
return result
def instantiate_factored_mapping(pairs):
part_mappings = [[list(zip(preimg, perm_img)) for perm_img in itertools.permutations(img)]
for (preimg, img) in pairs]
return tools.cartesian_product(part_mappings)
def find_unique_variables(action, invariant):
# find unique names for invariant variables
params = {p.name for p in action.parameters}
for eff in action.effects:
params.update([p.name for p in eff.parameters])
inv_vars = []
counter = itertools.count()
for _ in range(invariant.arity()):
while True:
new_name = "?v%i" % next(counter)
if new_name not in params:
inv_vars.append(new_name)
break
return inv_vars
def get_literals(condition):
if isinstance(condition, pddl.Literal):
yield condition
elif isinstance(condition, pddl.Conjunction):
yield from condition.parts
def ensure_conjunction_sat(system, *parts):
"""Modifies the constraint system such that it is only solvable if the
conjunction of all parts is satisfiable.
Each part must be an iterator, generator, or an iterable over
literals."""
pos = defaultdict(set)
neg = defaultdict(set)
for literal in itertools.chain(*parts):
if literal.predicate == "=": # use (in)equalities in conditions
if literal.negated:
n = constraints.NegativeClause([literal.args])
system.add_negative_clause(n)
else:
a = constraints.Assignment([literal.args])
system.add_assignment_disjunction([a])
else:
if literal.negated:
neg[literal.predicate].add(literal)
else:
pos[literal.predicate].add(literal)
for pred, posatoms in pos.items():
if pred in neg:
for posatom in posatoms:
for negatom in neg[pred]:
parts = list(zip(negatom.args, posatom.args))
if parts:
negative_clause = constraints.NegativeClause(parts)
system.add_negative_clause(negative_clause)
def ensure_cover(system, literal, invariant, inv_vars):
"""Modifies the constraint system such that it is only solvable if the
invariant covers the literal"""
a = invariant.get_covering_assignments(inv_vars, literal)
assert (len(a) == 1)
# if invariants could contain several parts of one predicate, this would
# not be true but the depending code in parts relies on this assumption
system.add_assignment_disjunction(a)
def ensure_inequality(system, literal1, literal2):
"""Modifies the constraint system such that it is only solvable if the
literal instantiations are not equal (ignoring whether one is negated and
the other is not)"""
if (literal1.predicate == literal2.predicate and
literal1.args):
parts = list(zip(literal1.args, literal2.args))
system.add_negative_clause(constraints.NegativeClause(parts))
class InvariantPart:
def __init__(self, predicate, order, omitted_pos=-1):
self.predicate = predicate
self.order = order
self.omitted_pos = omitted_pos
def __eq__(self, other):
# This implies equality of the omitted_pos component.
return self.predicate == other.predicate and self.order == other.order
def __ne__(self, other):
return self.predicate != other.predicate or self.order != other.order
def __le__(self, other):
return self.predicate <= other.predicate or self.order <= other.order
def __lt__(self, other):
return self.predicate < other.predicate or self.order < other.order
def __hash__(self):
return hash((self.predicate, tuple(self.order)))
def __str__(self):
var_string = " ".join(map(str, self.order))
omitted_string = ""
if self.omitted_pos != -1:
omitted_string = " [%d]" % self.omitted_pos
return "%s %s%s" % (self.predicate, var_string, omitted_string)
def arity(self):
return len(self.order)
def get_assignment(self, parameters, literal):
equalities = [(arg, literal.args[argpos])
for arg, argpos in zip(parameters, self.order)]
return constraints.Assignment(equalities)
def get_parameters(self, literal):
return [literal.args[pos] for pos in self.order]
def instantiate(self, parameters):
args = ["?X"] * (len(self.order) + (self.omitted_pos != -1))
for arg, argpos in zip(parameters, self.order):
args[argpos] = arg
return pddl.Atom(self.predicate, args)
def possible_mappings(self, own_literal, other_literal):
allowed_omissions = len(other_literal.args) - len(self.order)
if allowed_omissions not in (0, 1):
return []
own_parameters = self.get_parameters(own_literal)
arg_to_ordered_pos = invert_list(own_parameters)
other_arg_to_pos = invert_list(other_literal.args)
factored_mapping = []
for key, other_positions in other_arg_to_pos.items():
own_positions = arg_to_ordered_pos.get(key, [])
len_diff = len(own_positions) - len(other_positions)
if len_diff >= 1 or len_diff <= -2 or len_diff == -1 and not allowed_omissions:
return []
if len_diff:
own_positions.append(-1)
allowed_omissions = 0
factored_mapping.append((other_positions, own_positions))
return instantiate_factored_mapping(factored_mapping)
def possible_matches(self, own_literal, other_literal):
assert self.predicate == own_literal.predicate
result = []
for mapping in self.possible_mappings(own_literal, other_literal):
new_order = [None] * len(self.order)
omitted = -1
for (key, value) in mapping:
if value == -1:
omitted = key
else:
new_order[value] = key
result.append(InvariantPart(other_literal.predicate, new_order, omitted))
return result
def matches(self, other, own_literal, other_literal):
return self.get_parameters(own_literal) == other.get_parameters(other_literal)
class Invariant:
# An invariant is a logical expression of the type
# forall V1...Vk: sum_(part in parts) weight(part, V1, ..., Vk) <= 1.
# k is called the arity of the invariant.
# A "part" is a symbolic fact only variable symbols in {V1, ..., Vk, X};
# the symbol X may occur at most once.
def __init__(self, parts):
self.parts = frozenset(parts)
self.predicates = {part.predicate for part in parts}
self.predicate_to_part = {part.predicate: part for part in parts}
assert len(self.parts) == len(self.predicates)
def __eq__(self, other):
return self.parts == other.parts
def __ne__(self, other):
return self.parts != other.parts
def __lt__(self, other):
return self.parts < other.parts
def __le__(self, other):
return self.parts <= other.parts
def __hash__(self):
return hash(self.parts)
def __str__(self):
return "{%s}" % ", ".join(str(part) for part in self.parts)
def __repr__(self):
return '<Invariant %s>' % self
def arity(self):
return next(iter(self.parts)).arity()
def get_parameters(self, atom):
return self.predicate_to_part[atom.predicate].get_parameters(atom)
def instantiate(self, parameters):
return [part.instantiate(parameters) for part in self.parts]
def get_covering_assignments(self, parameters, atom):
part = self.predicate_to_part[atom.predicate]
return [part.get_assignment(parameters, atom)]
# if there were more parts for the same predicate the list
# contained more than one element
def check_balance(self, balance_checker, enqueue_func):
# Check balance for this hypothesis.
actions_to_check = set()
for part in self.parts:
actions_to_check |= balance_checker.get_threats(part.predicate)
for action in actions_to_check:
heavy_action = balance_checker.get_heavy_action(action)
if self.operator_too_heavy(heavy_action):
return False
if self.operator_unbalanced(action, enqueue_func):
return False
return True
def operator_too_heavy(self, h_action):
add_effects = [eff for eff in h_action.effects
if not eff.literal.negated and
self.predicate_to_part.get(eff.literal.predicate)]
inv_vars = find_unique_variables(h_action, self)
if len(add_effects) <= 1:
return False
for eff1, eff2 in itertools.combinations(add_effects, 2):
system = constraints.ConstraintSystem()
ensure_inequality(system, eff1.literal, eff2.literal)
ensure_cover(system, eff1.literal, self, inv_vars)
ensure_cover(system, eff2.literal, self, inv_vars)
ensure_conjunction_sat(system, get_literals(h_action.precondition),
get_literals(eff1.condition),
get_literals(eff2.condition),
[eff1.literal.negate()],
[eff2.literal.negate()])
if system.is_solvable():
return True
return False
def operator_unbalanced(self, action, enqueue_func):
inv_vars = find_unique_variables(action, self)
relevant_effs = [eff for eff in action.effects
if self.predicate_to_part.get(eff.literal.predicate)]
add_effects = [eff for eff in relevant_effs
if not eff.literal.negated]
del_effects = [eff for eff in relevant_effs
if eff.literal.negated]
for eff in add_effects:
if self.add_effect_unbalanced(action, eff, del_effects, inv_vars,
enqueue_func):
return True
return False
def minimal_covering_renamings(self, action, add_effect, inv_vars):
"""computes the minimal renamings of the action parameters such
that the add effect is covered by the action.
Each renaming is an constraint system"""
# add_effect must be covered
assigs = self.get_covering_assignments(inv_vars, add_effect.literal)
# renaming of operator parameters must be minimal
minimal_renamings = []
params = [p.name for p in action.parameters]
for assignment in assigs:
system = constraints.ConstraintSystem()
system.add_assignment(assignment)
mapping = assignment.get_mapping()
if len(params) > 1:
for (n1, n2) in itertools.combinations(params, 2):
if mapping.get(n1, n1) != mapping.get(n2, n2):
negative_clause = constraints.NegativeClause([(n1, n2)])
system.add_negative_clause(negative_clause)
minimal_renamings.append(system)
return minimal_renamings
def add_effect_unbalanced(self, action, add_effect, del_effects,
inv_vars, enqueue_func):
minimal_renamings = self.minimal_covering_renamings(action, add_effect,
inv_vars)
lhs_by_pred = defaultdict(list)
for lit in itertools.chain(get_literals(action.precondition),
get_literals(add_effect.condition),
get_literals(add_effect.literal.negate())):
lhs_by_pred[lit.predicate].append(lit)
for del_effect in del_effects:
minimal_renamings = self.unbalanced_renamings(
del_effect, add_effect, inv_vars, lhs_by_pred, minimal_renamings)
if not minimal_renamings:
return False
# Otherwise, the balance check fails => Generate new candidates.
self.refine_candidate(add_effect, action, enqueue_func)
return True
def refine_candidate(self, add_effect, action, enqueue_func):
"""refines the candidate for an add effect that is unbalanced in the
action and adds the refined one to the queue"""
part = self.predicate_to_part[add_effect.literal.predicate]
for del_eff in [eff for eff in action.effects if eff.literal.negated]:
if del_eff.literal.predicate not in self.predicate_to_part:
for match in part.possible_matches(add_effect.literal,
del_eff.literal):
enqueue_func(Invariant(self.parts.union((match,))))
def unbalanced_renamings(self, del_effect, add_effect, inv_vars,
lhs_by_pred, unbalanced_renamings):
"""returns the renamings from unbalanced renamings for which
the del_effect does not balance the add_effect."""
system = constraints.ConstraintSystem()
ensure_cover(system, del_effect.literal, self, inv_vars)
# Since we may only rename the quantified variables of the delete effect
# we need to check that "renamings" of constants are already implied by
# the unbalanced_renaming (of the of the operator parameters). The
# following system is used as a helper for this. It builds a conjunction
# that formulates that the constants are NOT renamed accordingly. We
# below check that this is impossible with each unbalanced renaming.
check_constants = False
constant_test_system = constraints.ConstraintSystem()
for a, b in system.combinatorial_assignments[0][0].equalities:
# first 0 because the system was empty before we called ensure_cover
# second 0 because ensure_cover only adds assignments with one entry
if b[0] != "?":
check_constants = True
neg_clause = constraints.NegativeClause([(a, b)])
constant_test_system.add_negative_clause(neg_clause)
ensure_inequality(system, add_effect.literal, del_effect.literal)
still_unbalanced = []
for renaming in unbalanced_renamings:
if check_constants:
new_sys = constant_test_system.combine(renaming)
if new_sys.is_solvable():
# it is possible that the operator arguments are not
# mapped to constants as required for covering the delete
# effect
still_unbalanced.append(renaming)
continue
new_sys = system.combine(renaming)
if self.lhs_satisfiable(renaming, lhs_by_pred):
implies_system = self.imply_del_effect(del_effect, lhs_by_pred)
if not implies_system:
still_unbalanced.append(renaming)
continue
new_sys = new_sys.combine(implies_system)
if not new_sys.is_solvable():
still_unbalanced.append(renaming)
return still_unbalanced
def lhs_satisfiable(self, renaming, lhs_by_pred):
system = renaming.copy()
ensure_conjunction_sat(system, *itertools.chain(lhs_by_pred.values()))
return system.is_solvable()
def imply_del_effect(self, del_effect, lhs_by_pred):
"""returns a constraint system that is solvable if lhs implies
the del effect (only if lhs is satisfiable). If a solvable
lhs never implies the del effect, return None."""
# del_effect.cond and del_effect.atom must be implied by lhs
implies_system = constraints.ConstraintSystem()
for literal in itertools.chain(get_literals(del_effect.condition),
[del_effect.literal.negate()]):
poss_assignments = []
for match in lhs_by_pred[literal.predicate]:
if match.negated != literal.negated:
continue
else:
a = constraints.Assignment(list(zip(literal.args, match.args)))
poss_assignments.append(a)
if not poss_assignments:
return None
implies_system.add_assignment_disjunction(poss_assignments)
return implies_system
| 17,241 | 40.546988 | 94 | py |
DAAISy | DAAISy-main/src/utils/translate/greedy_join.py | import sys
from . import pddl_fd as pddl
from . import pddl_to_prolog
class OccurrencesTracker:
"""Keeps track of the number of times each variable appears
in a list of symbolic atoms."""
def __init__(self, rule):
self.occurrences = {}
self.update(rule.effect, +1)
for cond in rule.conditions:
self.update(cond, +1)
def update(self, symatom, delta):
for var in symatom.args:
if var[0] == "?":
if var not in self.occurrences:
self.occurrences[var] = 0
self.occurrences[var] += delta
assert self.occurrences[var] >= 0
if not self.occurrences[var]:
del self.occurrences[var]
def variables(self):
return set(self.occurrences)
class CostMatrix:
def __init__(self, joinees):
self.joinees = []
self.cost_matrix = []
for joinee in joinees:
self.add_entry(joinee)
def add_entry(self, joinee):
new_row = [self.compute_join_cost(joinee, other) for other in self.joinees]
self.cost_matrix.append(new_row)
self.joinees.append(joinee)
def delete_entry(self, index):
for row in self.cost_matrix[index + 1:]:
del row[index]
del self.cost_matrix[index]
del self.joinees[index]
def find_min_pair(self):
assert len(self.joinees) >= 2
min_cost = (sys.maxsize, sys.maxsize)
for i, row in enumerate(self.cost_matrix):
for j, entry in enumerate(row):
if entry < min_cost:
min_cost = entry
left_index, right_index = i, j
return left_index, right_index
def remove_min_pair(self):
left_index, right_index = self.find_min_pair()
left, right = self.joinees[left_index], self.joinees[right_index]
assert left_index > right_index
self.delete_entry(left_index)
self.delete_entry(right_index)
return (left, right)
def compute_join_cost(self, left_joinee, right_joinee):
left_vars = pddl_to_prolog.get_variables([left_joinee])
right_vars = pddl_to_prolog.get_variables([right_joinee])
if len(left_vars) > len(right_vars):
left_vars, right_vars = right_vars, left_vars
common_vars = left_vars & right_vars
return (len(left_vars) - len(common_vars),
len(right_vars) - len(common_vars),
-len(common_vars))
def can_join(self):
return len(self.joinees) >= 2
class ResultList:
def __init__(self, rule, name_generator):
self.final_effect = rule.effect
self.result = []
self.name_generator = name_generator
def get_result(self):
self.result[-1].effect = self.final_effect
return self.result
def add_rule(self, type, conditions, effect_vars):
effect = pddl.Atom(next(self.name_generator), effect_vars)
rule = pddl_to_prolog.Rule(conditions, effect)
rule.type = type
self.result.append(rule)
return rule.effect
def greedy_join(rule, name_generator):
assert len(rule.conditions) >= 2
cost_matrix = CostMatrix(rule.conditions)
occurrences = OccurrencesTracker(rule)
result = ResultList(rule, name_generator)
while cost_matrix.can_join():
joinees = list(cost_matrix.remove_min_pair())
for joinee in joinees:
occurrences.update(joinee, -1)
common_vars = set(joinees[0].args) & set(joinees[1].args)
condition_vars = set(joinees[0].args) | set(joinees[1].args)
effect_vars = occurrences.variables() & condition_vars
for i, joinee in enumerate(joinees):
joinee_vars = set(joinee.args)
retained_vars = joinee_vars & (effect_vars | common_vars)
if retained_vars != joinee_vars:
joinees[i] = result.add_rule("project", [joinee], sorted(retained_vars))
joint_condition = result.add_rule("join", joinees, sorted(effect_vars))
cost_matrix.add_entry(joint_condition)
occurrences.update(joint_condition, +1)
# assert occurrences.variables() == set(rule.effect.args)
# for var in set(rule.effect.args):
# assert occurrences.occurrences[var] == 2 * rule.effect.args.count(var)
return result.get_result()
| 4,387 | 33.825397 | 88 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.