file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,786
Python
36.435443
82
0.618355
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v6.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v5", "issue814-v6"] if common_setup.is_test_run(): BUILDS = ["release32"] else: BUILDS = ["debug64", "release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,252
Python
31.185714
108
0.678508
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v15.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v13", "issue814-v14", "issue814-v15"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["ff"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,244
Python
30.619718
94
0.676471
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v10-base-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-base", "issue814-v10"] if common_setup.is_test_run(): BUILDS = ["release32"] else: BUILDS = ["debug64", "release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,256
Python
31.242857
108
0.679078
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v17-hmax.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v12-base", "issue814-v16", "issue814-v17"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "astar(h)"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["hmax"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,243
Python
30.605633
94
0.677218
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v16-hmax.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v15", "issue814-v16"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "astar(h)"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["hmax"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,222
Python
30.309859
94
0.676868
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v11-base-hmax-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-base", "issue814-v11"] if common_setup.is_test_run(): BUILDS = ["release32"] else: BUILDS = ["debug64", "release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "astar(h)"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["hmax"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,234
Python
30.478873
94
0.676813
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-base", "issue814-v1"] if common_setup.is_test_run(): BUILDS = ["release32"] else: BUILDS = ["debug64", "release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,255
Python
31.228571
108
0.678936
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v9.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v8", "issue814-v9"] if common_setup.is_test_run(): BUILDS = ["release32"] else: BUILDS = ["debug64", "release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,252
Python
31.185714
108
0.678508
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v15-hadd.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v13", "issue814-v14", "issue814-v15"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["add"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,249
Python
30.69014
94
0.67719
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v16-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v15", "issue814-v16"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,239
Python
30.549295
94
0.676195
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v13.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v12", "issue814-v13"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,235
Python
30.492957
94
0.675615
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v1_7_10.py
#! /usr/bin/env python # -*- coding: utf-8 -*- """This experiment is only here for completeness because it was referred to on the tracker. Don't use it as given here: the different experiments that are aggregated here used different timeouts, so the results will be misleading.""" import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-base", "issue814-v1", "issue814-v7", "issue814-v10"] if common_setup.is_test_run(): BUILDS = ["release32"] else: BUILDS = ["debug64", "release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) ## The following steps are to actually run the experiment. # exp.add_step('build', exp.build) # exp.add_step('start', exp.start_runs) # exp.add_fetcher(name='fetch') ## Alternatively, the following steps fetch the results from the other experiments. exp.add_fetcher('data/issue814-v1-eval') exp.add_fetcher('data/issue814-v7-eval') exp.add_fetcher('data/issue814-v10-eval') exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,794
Python
33.506172
108
0.696492
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v15-hmax.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v13", "issue814-v14", "issue814-v15"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "astar(h)"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["hmax"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,238
Python
30.535211
94
0.676944
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v2", "issue814-v3"] if common_setup.is_test_run(): BUILDS = ["release32"] else: BUILDS = ["debug64", "release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,252
Python
31.185714
108
0.678508
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/align-hmax.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v14", "issue814-v16"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "astar(h)"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["hmax"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,222
Python
30.309859
94
0.676868
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/align-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v14", "issue814-v16"] if common_setup.is_test_run(): BUILDS = ["release64"] else: BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,239
Python
30.549295
94
0.676195
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue814/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue814-v1", "issue814-v2"] if common_setup.is_test_run(): BUILDS = ["release32"] else: BUILDS = ["debug64", "release32", "release64"] CONFIGS = [ IssueConfig( build + "-{heuristic}".format(**locals()), ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for heuristic in ["add", "ff"] ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["simplify_before", "simplify_after", "simplify_time"]) for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,253
Python
31.2
108
0.678651
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue990/v1-satisficing.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import common_setup import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment REVISIONS = [ "issue990-base", "issue990-v1", ] CONFIGS = [ common_setup.IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg(reasonable_orders=false))])"]), ] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REPO = os.environ["DOWNWARD_REPO"] if common_setup.is_running_on_cluster(): SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) else: SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_absolute_report_step() exp.add_parse_again_step() exp.run_steps()
1,393
Python
23.892857
111
0.693467
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue990/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".git" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".git")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) if config_nick2 is not None: name += "-" + config_nick2 print("Make scatter plot for", name) algo1 = get_algo_nick(rev1, config_nick) algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) report = ScatterPlotReport( filter_algorithm=[algo1, algo2], attributes=[attribute], relative=relative, get_category=lambda run1, run2: run1["domain"]) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) for nick1, nick2, rev1, rev2, attribute in additional: make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) self.add_step(step_name, lambda: make_scatter_plots)
14,979
Python
36.828283
94
0.618065
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue990/v1-optimal-2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import common_setup import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment REVISIONS = [ "issue990-base", "issue990-v1", ] CONFIGS = [ common_setup.IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), ] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REPO = os.environ["DOWNWARD_REPO"] if common_setup.is_running_on_cluster(): SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) else: SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_parse_again_step() exp.run_steps()
1,761
Python
29.37931
191
0.68711
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue990/v1-satisficing-2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import common_setup import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment REVISIONS = [ "issue990-base", "issue990-v1", ] CONFIGS = [ common_setup.IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), common_setup.IssueConfig("lama-first-pref", ["--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=true)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg(reasonable_orders=false))])"]), ] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REPO = os.environ["DOWNWARD_REPO"] if common_setup.is_running_on_cluster(): SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) else: SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_parse_again_step() exp.run_steps()
1,736
Python
28.948275
310
0.697005
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue990/v1-optimal.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import common_setup import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment REVISIONS = [ "issue990-base", "issue990-v1", ] CONFIGS = [ common_setup.IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), common_setup.IssueConfig("lm-exhaust", ["--search", "astar(lmcount(lm_exhaust(), admissible=true))"]), ] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REPO = os.environ["DOWNWARD_REPO"] if common_setup.is_running_on_cluster(): SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) else: SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_absolute_report_step() exp.add_parse_again_step() exp.run_steps()
1,348
Python
23.527272
106
0.689169
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue456/sat-v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue456-base", "issue456-v2"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { "eager_greedy_add": [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"], "lazy_greedy_ff": [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
650
Python
18.147058
45
0.573846
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue456/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,755
Python
35.135977
79
0.610349
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue456/opt-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue456-base", "issue456-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "astar_blind": ["--search", "astar(blind())"], "astar_hmax": ["--search", "astar(hmax())"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
492
Python
17.961538
50
0.636179
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue456/sat-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue456-base", "issue456-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { "eager_greedy_add": [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"], "lazy_greedy_ff": [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
650
Python
18.147058
45
0.573846
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue456/opt-v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue456-base", "issue456-v2"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "astar_blind": ["--search", "astar(blind())"], "astar_hmax": ["--search", "astar(hmax())"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
492
Python
17.961538
50
0.636179
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue740/common.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["8e0b8e1f6edc"] EXPORTS = ["PYTHONPATH", "PATH", "DOWNWARD_BENCHMARKS"] def generate_configs(sas_filenames): configs = [] for sas_file in sas_filenames: common_driver_options = [] if sas_file is None else ["--sas-file", sas_file] configs += [ IssueConfig('lazy-greedy-blind-{}'.format(sas_file), ['--search', 'lazy_greedy([blind()])'], driver_options=common_driver_options + []), IssueConfig('lama-first-{}'.format(sas_file), [], driver_options=common_driver_options + ["--alias", "lama-first"]), IssueConfig("seq_sat_fdss_1-{}".format(sas_file), [], driver_options=common_driver_options + ["--alias", "seq-sat-fdss-1"]), IssueConfig("seq_sat_fdss_-{}".format(sas_file), [], driver_options=common_driver_options + ["--portfolio", "driver/portfolios/seq_sat_fdss_2.py", "--overall-time-limit", "20s"]), IssueConfig('translate-only-{}'.format(sas_file), [], driver_options=['--translate'] + common_driver_options), ] return configs def generate_experiments(configs): SUITE = ["gripper:prob01.pddl", "blocks:probBLOCKS-5-0.pddl", "visitall-sat11-strips:problem12.pddl", "airport:p01-airport1-p1.pddl"] ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=EXPORTS) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = IssueExperiment( revisions=REVISIONS, configs=configs, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.run_steps()
2,196
Python
33.328124
117
0.59745
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue740/out_sas.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import common # We want to test both NOT specifying the -sas-file option AND specifying the default "output.sas" value. # The result should be the same in both cases common.generate_experiments(common.generate_configs((None, "output.sas")))
289
Python
35.249996
105
0.737024
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue740/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: from relativescatter import RelativeScatterPlotReport report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,182
Python
35.838961
79
0.613383
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue740/foobar_sas.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import common common.generate_experiments(common.generate_configs(["foobar.sas"]))
132
Python
17.999997
68
0.69697
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue526/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,153
Python
35.955613
82
0.615205
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue526/v1-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue526-base", "issue526-v1"] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) CONFIGS = [ IssueConfig( "ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"], driver_options=["--overall-time-limit", "5m"]), IssueConfig( "lama-first-lazy", [], driver_options=["--alias", "lama-first", "--overall-time-limit", "5m"]), IssueConfig( "lama-first-eager", ["--evaluator", """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), transform=adapt_costs(one))""", "--evaluator", "hff=ff_synergy(hlm)", "--search", """eager_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one)"""], driver_options=["--overall-time-limit", "5m"]), ] if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,384
Python
29.974026
94
0.634228
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue526/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print("Make scatter plot for", name) algo1 = get_algo_nick(rev1, config_nick) algo2 = get_algo_nick(rev2, config_nick) report = report_class( filter_algorithm=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"]) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,744
Python
36.423858
82
0.619778
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/base.py
#! /usr/bin/env python2 # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue939-base"] CONFIGS = [ IssueConfig( "translate-only", [], driver_options=["--translate"]) ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]") # This was generated by running "./suites.py all" in the benchmarks # repository. SUITE = [ 'agricola-opt18-strips', 'agricola-sat18-strips', 'airport', 'airport-adl', 'assembly', 'barman-mco14-strips', 'barman-opt11-strips', 'barman-opt14-strips', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-opt18-adl', 'caldera-sat18-adl', 'caldera-split-opt18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-opt14-strips', 'childsnack-sat14-strips', 'citycar-opt14-adl', 'citycar-sat14-adl', 'data-network-opt18-strips', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-opt11-strips', 'floortile-opt14-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-opt14-strips', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-agl14-strips', 'hiking-opt14-strips', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-opt14-adl', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'no-mprime', 'no-mystery', 'nomystery-opt11-strips', 'nomystery-sat11-strips', 'nurikabe-opt18-adl', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-agl14-strips', 'openstacks-opt08-adl', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-opt18-strips', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-opt18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parcprinter-sat11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pegsol-sat11-strips', 'petri-net-alignment-opt18-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-opt18-adl', 'settlers-sat18-adl', 'snake-opt18-strips', 'snake-sat18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-opt18-strips', 'spider-sat18-strips', 'storage', 'termes-opt18-strips', 'termes-sat18-strips', 'tetris-opt14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'thoughtful-sat14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel', ] if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser("translator_additional_parser.py") del exp.commands['remove-output-sas'] exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_parse_again_step() exp.add_fetcher(name='fetch') exp.run_steps()
4,843
Python
24.361256
68
0.655585
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/fetch.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from collections import defaultdict import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.experiment import Experiment from downward.reports import PlanningReport from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport import common_setup DIR = os.path.dirname(os.path.abspath(__file__)) ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]") if common_setup.is_test_run(): ENVIRONMENT = LocalEnvironment(processes=4) exp = Experiment() class TranslatorDiffReport(PlanningReport): def get_cell(self, run): return ";".join(run.get(attr) for attr in self.attributes) def get_text(self): lines = [] for runs in self.problem_runs.values(): hashes = set([r.get("translator_output_sas_hash") for r in runs]) if len(hashes) > 1 or None in hashes: lines.append(";".join([self.get_cell(r) for r in runs])) return "\n".join(lines) class SameValueFilters(object): """Ignore runs for a task where all algorithms have the same value.""" def __init__(self, attribute): self._attribute = attribute self._tasks_to_values = defaultdict(list) def _get_task(self, run): return (run['domain'], run['problem']) def store_values(self, run): value = run.get(self._attribute) self._tasks_to_values[self._get_task(run)].append(value) # Don't filter this run, yet. return True def filter_tasks_with_equal_values(self, run): values = self._tasks_to_values[self._get_task(run)] return len(set(values)) != 1 exp.add_fetcher(src='data/issue939-base-eval') exp.add_fetcher(src='data/issue939-v1-eval', merge=True) ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"] #exp.add_comparison_table_step(attributes=ATTRIBUTES) same_value_filters = SameValueFilters("translator_output_sas_hash") # exp.add_comparison_table_step( # name="filtered", # attributes=ATTRIBUTES, # filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values]) exp.add_report(TranslatorDiffReport( attributes=["domain", "problem", "algorithm", "run_dir"] ), outfile="different_output_sas.csv" ) exp.add_report(AbsoluteReport(attributes=ATTRIBUTES)) exp.add_report(ComparativeReport([ ('issue939-base-translate-only', 'issue939-v1-translate-only') ], attributes=ATTRIBUTES)) exp.run_steps()
2,598
Python
29.57647
98
0.687452
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/translator_additional_parser.py
#!/usr/bin/env python import hashlib from lab.parser import Parser def add_hash_value(content, props): props['translator_output_sas_hash'] = hashlib.sha512(str(content).encode('utf-8')).hexdigest() parser = Parser() parser.add_function(add_hash_value, file="output.sas") parser.parse()
294
Python
21.692306
98
0.731293
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/v1.py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue939-v1"] CONFIGS = [ IssueConfig( "translate-only", [], driver_options=["--translate"]) ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]") # This was generated by running "./suites.py all" in the benchmarks # repository. SUITE = [ 'agricola-opt18-strips', 'agricola-sat18-strips', 'airport', 'airport-adl', 'assembly', 'barman-mco14-strips', 'barman-opt11-strips', 'barman-opt14-strips', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-opt18-adl', 'caldera-sat18-adl', 'caldera-split-opt18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-opt14-strips', 'childsnack-sat14-strips', 'citycar-opt14-adl', 'citycar-sat14-adl', 'data-network-opt18-strips', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-opt11-strips', 'floortile-opt14-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-opt14-strips', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-agl14-strips', 'hiking-opt14-strips', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-opt14-adl', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'no-mprime', 'no-mystery', 'nomystery-opt11-strips', 'nomystery-sat11-strips', 'nurikabe-opt18-adl', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-agl14-strips', 'openstacks-opt08-adl', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-opt18-strips', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-opt18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parcprinter-sat11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pegsol-sat11-strips', 'petri-net-alignment-opt18-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-opt18-adl', 'settlers-sat18-adl', 'snake-opt18-strips', 'snake-sat18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-opt18-strips', 'spider-sat18-strips', 'storage', 'termes-opt18-strips', 'termes-sat18-strips', 'tetris-opt14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'thoughtful-sat14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel', ] if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser("translator_additional_parser.py") del exp.commands['remove-output-sas'] exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_parse_again_step() exp.add_fetcher(name='fetch') exp.run_steps()
4,841
Python
24.350785
68
0.655443
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue939/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/v2-v4-compare.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') REVISIONS = [] CONFIGS = [] SUITE = DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email='[email protected]') if is_test_run(): SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, ms_construction_time, ms_atomic_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_fetcher('data/issue668-v2-eval') exp.add_fetcher('data/issue668-v4-eval') exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ ('%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v4'), ]),outfile='issue668-v2-v4-compare-abp.html') exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ ('%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v4'), ]),name='issue668-v2-v4-compare-pba.html') exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v4'), ('%s-sbf-miasm-allrnd-b50k' % 'issue668-v2','%s-sbf-miasm-allrnd-b50k' % 'issue668-v4'), ]),name='issue668-v2-v4-compare-paper.html') exp.run_steps()
5,070
Python
49.20792
145
0.686982
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport try: from relativescatter import RelativeScatterPlotReport matplotlib = True except ImportError: print 'matplotlib not availabe, scatter plots not available' matplotlib = False def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", "unsolvable", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if matplotlib: if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,457
Python
36.071795
82
0.610638
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/v5-compare.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') REVISIONS = [] CONFIGS = [] SUITE = DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH"]) if is_test_run(): SUITE = ['depot:p01.pddl', 'parcprinter-opt11-strips:p01.pddl', 'mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, ms_construction_time, ms_atomic_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_fetcher('data/issue668-v5-hack-eval') exp.add_fetcher('data/issue668-v5-clean-eval') exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ ('%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v5-clean'), ]),outfile='issue668-v5-hack-vs-clean-abp.html') exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ ('%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v5-clean'), ]),outfile='issue668-v5-hack-vs-clean-pba.html') exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-clean'), ('%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-hack','%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-clean'), ]),outfile='issue668-v5-hack-vs-clean-paper.html') exp.run_steps()
5,328
Python
52.289999
129
0.694632
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/suites.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import argparse import textwrap HELP = "Convert suite name to list of domains or tasks." def suite_alternative_formulations(): return ['airport-adl', 'no-mprime', 'no-mystery'] def suite_ipc98_to_ipc04_adl(): return [ 'assembly', 'miconic-fulladl', 'miconic-simpleadl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'schedule', ] def suite_ipc98_to_ipc04_strips(): return [ 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', 'satellite', 'zenotravel', ] def suite_ipc98_to_ipc04(): # All IPC1-4 domains, including the trivial Movie. return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) def suite_ipc06_adl(): return [ 'openstacks', 'pathways', 'trucks', ] def suite_ipc06_strips_compilations(): return [ 'openstacks-strips', 'pathways-noneg', 'trucks-strips', ] def suite_ipc06_strips(): return [ 'pipesworld-tankage', 'rovers', 'storage', 'tpp', ] def suite_ipc06(): return sorted(suite_ipc06_adl() + suite_ipc06_strips()) def suite_ipc08_common_strips(): return [ 'parcprinter-08-strips', 'pegsol-08-strips', 'scanalyzer-08-strips', ] def suite_ipc08_opt_adl(): return ['openstacks-opt08-adl'] def suite_ipc08_opt_strips(): return sorted(suite_ipc08_common_strips() + [ 'elevators-opt08-strips', 'openstacks-opt08-strips', 'sokoban-opt08-strips', 'transport-opt08-strips', 'woodworking-opt08-strips', ]) def suite_ipc08_opt(): return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) def suite_ipc08_sat_adl(): return ['openstacks-sat08-adl'] def suite_ipc08_sat_strips(): return sorted(suite_ipc08_common_strips() + [ # Note: cyber-security is missing. 'elevators-sat08-strips', 'openstacks-sat08-strips', 'sokoban-sat08-strips', 'transport-sat08-strips', 'woodworking-sat08-strips', ]) def suite_ipc08_sat(): return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) def suite_ipc08(): return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) def suite_ipc11_opt(): return [ 'barman-opt11-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'nomystery-opt11-strips', 'openstacks-opt11-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'pegsol-opt11-strips', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'tidybot-opt11-strips', 'transport-opt11-strips', 'visitall-opt11-strips', 'woodworking-opt11-strips', ] def suite_ipc11_sat(): return [ 'barman-sat11-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'nomystery-sat11-strips', 'openstacks-sat11-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'pegsol-sat11-strips', 'scanalyzer-sat11-strips', 'sokoban-sat11-strips', 'tidybot-sat11-strips', 'transport-sat11-strips', 'visitall-sat11-strips', 'woodworking-sat11-strips', ] def suite_ipc11(): return sorted(suite_ipc11_opt() + suite_ipc11_sat()) def suite_ipc14_agl_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_agl_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-agl14-strips', 'openstacks-agl14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_agl(): return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) def suite_ipc14_mco_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_mco_strips(): return [ 'barman-mco14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_mco(): return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) def suite_ipc14_opt_adl(): return [ 'cavediving-14-adl', 'citycar-opt14-adl', 'maintenance-opt14-adl', ] def suite_ipc14_opt_strips(): return [ 'barman-opt14-strips', 'childsnack-opt14-strips', 'floortile-opt14-strips', 'ged-opt14-strips', 'hiking-opt14-strips', 'openstacks-opt14-strips', 'parking-opt14-strips', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'transport-opt14-strips', 'visitall-opt14-strips', ] def suite_ipc14_opt(): return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) def suite_ipc14_sat_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_sat_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_sat(): return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) def suite_ipc14(): return sorted(set( suite_ipc14_agl() + suite_ipc14_mco() + suite_ipc14_opt() + suite_ipc14_sat())) def suite_unsolvable(): return sorted( ['mystery:prob%02d.pddl' % index for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) def suite_optimal_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) def suite_optimal_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + suite_ipc11_opt() + suite_ipc14_opt_strips()) def suite_optimal(): return sorted(suite_optimal_adl() + suite_optimal_strips()) def suite_satisficing_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) def suite_satisficing_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + suite_ipc11_sat() + suite_ipc14_sat_strips()) def suite_satisficing(): return sorted(suite_satisficing_adl() + suite_satisficing_strips()) def suite_all(): return sorted( suite_ipc98_to_ipc04() + suite_ipc06() + suite_ipc06_strips_compilations() + suite_ipc08() + suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("suite", help="suite name") return parser.parse_args() def main(): prefix = "suite_" suite_names = [ name[len(prefix):] for name in sorted(globals().keys()) if name.startswith(prefix)] parser = argparse.ArgumentParser(description=HELP) parser.add_argument("suite", choices=suite_names, help="suite name") parser.add_argument( "--width", default=72, type=int, help="output line width (default: %(default)s). Use 1 for single " "column.") args = parser.parse_args() suite_func = globals()[prefix + args.suite] print(textwrap.fill( str(suite_func()), width=args.width, break_long_words=False, break_on_hyphens=False)) if __name__ == "__main__": main()
8,551
Python
23.364672
77
0.595954
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/v5-paper.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.absolute import AbsoluteReport from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') REVISIONS = ["issue668-v5-hack"] CONFIGS = [ IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), ] SUITE = DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH"], partition='infai_1') if is_test_run(): SUITE = ['depot:p01.pddl', 'parcprinter-opt11-strips:p01.pddl', 'mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) exp.add_parser('ms_parser', 'ms-parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, ms_construction_time, ms_atomic_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack', '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack', '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack', '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack', '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack', '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack', '%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-hack', ]),outfile='issue668-v5-paper.html') exp.run_steps()
6,978
Python
74.04301
587
0.766982
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'none' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) def check_proved_unsolvability(content, props): proved_unsolvability = False if props['coverage'] == 0: for line in content.splitlines(): if line == 'Completely explored state space -- no solution!': proved_unsolvability = True break props['proved_unsolvability'] = proved_unsolvability parser.add_function(check_proved_unsolvability) parser.parse()
2,816
Python
37.589041
139
0.669389
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue668/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
Python
35.654205
84
0.597042
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue988/v1-satisficing.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue988-base", "issue988-v1"] CONFIGS = [ IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_comparison_table_step() exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) exp.run_steps()
1,366
Python
24.314814
68
0.724012
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue988/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".git" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".git")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) if config_nick2 is not None: name += "-" + config_nick2 print("Make scatter plot for", name) algo1 = get_algo_nick(rev1, config_nick) algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) report = ScatterPlotReport( filter_algorithm=[algo1, algo2], attributes=[attribute], relative=relative, get_category=lambda run1, run2: run1["domain"]) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: print(config) for rev1, rev2 in itertools.combinations(self._revisions, 2): print(rev1, rev2) for attribute in self.get_supported_attributes( config.nick, attributes): print(attribute) make_scatter_plot(config.nick, rev1, rev2, attribute) for nick1, nick2, rev1, rev2, attribute in additional: make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) self.add_step(step_name, make_scatter_plots)
15,080
Python
36.796992
94
0.616048
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue988/v1-optimal.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue988-base", "issue988-v1"] CONFIGS = [ IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_comparison_table_step() exp.run_steps()
1,262
Python
23.288461
68
0.7187
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue123/issue123.py
#! /usr/bin/env python from standard_experiment import REMOTE, get_exp from downward import suites #from lab.reports import Attribute, avg import os.path # Set the following variables for the experiment REPO_NAME = 'fd-issue123' # revisions, e.g. ['3d6c1ccacdce'] REVISIONS = ['issue123-base'] # suites, e.g. ['gripper:prob01.pddl', 'zenotravel:pfile1'] or suites.suite_satisficing_with_ipc11() LOCAL_SUITE = ['depot:pfile1'] GRID_SUITE = suites.suite_satisficing_with_ipc11() # configs, e.g. '--search', 'astar(lmcut())' for config CONFIGS = { 'lama-2011': [ "--if-unit-cost", "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", "--search", "iterated([" " lazy_greedy([hff,hlm],preferred=[hff,hlm])," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)" " ],repeat_last=true,continue_on_fail=true)", "--if-non-unit-cost", "--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true," " lm_cost_type=one,cost_type=one))", "--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true," " lm_cost_type=plusone,cost_type=plusone))", "--search", "iterated([" " lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1]," " cost_type=one,reopen_closed=false)," " lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2]," " reopen_closed=false)," " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5)," " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3)," " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2)," " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)" " ],repeat_last=true,continue_on_fail=true)", ], 'lama-2011-first-it': [ "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true," " lm_cost_type=one,cost_type=one))", "--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one)" ], 'lama-2011-separated': [ "--if-unit-cost", "--heuristic", "hlm=lmcount(lm_rhw(reasonable_orders=true),pref=true)", "--heuristic", "hff=ff()", "--search", "iterated([" " lazy_greedy([hff,hlm],preferred=[hff,hlm])," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2)," " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)" " ],repeat_last=true,continue_on_fail=true)", "--if-non-unit-cost", "--heuristic", "hlm1=lmcount(lm_rhw(reasonable_orders=true," " lm_cost_type=one,cost_type=one)," " pref=true,cost_type=one)", "--heuristic", "hff1=ff(cost_type=one)", "--heuristic", "hlm2=lmcount(lm_rhw(reasonable_orders=true," " lm_cost_type=plusone,cost_type=plusone)," " pref=true,cost_type=plusone)", "--heuristic", "hff2=ff(cost_type=plusone)", "--search", "iterated([" " lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1]," " cost_type=one,reopen_closed=false)," " lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2]," " reopen_closed=false)," " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5)," " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3)," " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2)," " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)" " ],repeat_last=true,continue_on_fail=true)", ], 'lama-2011-first-it-separated': [ "--heuristic", "hlm=lmcount(lm_rhw(reasonable_orders=true," " lm_cost_type=one,cost_type=one)," " pref=true,cost_type=one)", "--heuristic", "hff=ff(cost_type=one)", "--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one)", ], } # limits, e.g. { 'search_time': 120 } LIMITS = None # for 'make debug', set to True. COMPILATION_OPTION = None #(default: 'release') # choose any lower priority if whished PRIORITY = None #(default: 0) # Do not change anything below here SCRIPT_PATH = os.path.abspath(__file__) if REMOTE: SUITE = GRID_SUITE REPO = os.path.expanduser('~/repos/' + REPO_NAME) else: SUITE = LOCAL_SUITE REPO = os.path.expanduser('~/work/' + REPO_NAME) # Create the experiment. Add parsers, fetchers or reports... exp = get_exp(script_path=SCRIPT_PATH, repo=REPO, suite=SUITE, configs=CONFIGS, revisions=REVISIONS, limits=LIMITS, compilation_option=COMPILATION_OPTION, priority=PRIORITY) exp.add_score_attributes() exp.add_extra_attributes(['quality']) REV = REVISIONS[0] configs_lama = [('%s-lama-2011' % REV, '%s-lama-2011-separated' % REV)] exp.add_configs_report(compared_configs=configs_lama, name='lama') configs_lama_first_it = [('%s-lama-2011-first-it' % REV, '%s-lama-2011-first-it-separated' % REV)] exp.add_configs_report(compared_configs=configs_lama_first_it, name='lama-first-it') exp.add_absolute_report() exp()
5,928
Python
41.049645
100
0.529184
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue392/lama-nonunit.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue392-v2"] LIMITS = {"search_time": 300} CONFIGS = {} for randomize in ["false", "true"]: for pref_first in ["false", "true"]: CONFIGS["lama-nonunit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [ "--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=ONE,cost_type=ONE))", "--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))", "--search", "iterated([" "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,cost_type=ONE,reopen_closed=false)," "lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,reopen_closed=false)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2)," "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)]," "repeat_last=true,continue_on_fail=true)" % locals() ] SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) & set(suites.suite_diverse_costs())) exp = common_setup.IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_absolute_report_step() exp()
1,902
Python
43.255813
176
0.650368
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue392/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" # TODO: Add something about errors/exit codes. DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for attribute in valid_attributes: name = "-".join([rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,677
Python
35.431034
79
0.607715
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue392/lama-unit.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue392-v2"] LIMITS = {"search_time": 300} CONFIGS = {} for randomize in ["false", "true"]: for pref_first in ["false", "true"]: CONFIGS["lama-unit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [ "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))", "--search", "iterated([" "lazy_greedy([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s)," "lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5)," "lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3)," "lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2)," "lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)]," "repeat_last=true,continue_on_fail=true)" % locals() ] SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) & set(suites.suite_unit_costs())) exp = common_setup.IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_absolute_report_step() exp()
1,532
Python
35.499999
139
0.648172
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1000/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".git" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".git")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) if config_nick2 is not None: name += "-" + config_nick2 print("Make scatter plot for", name) algo1 = get_algo_nick(rev1, config_nick) algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) report = ScatterPlotReport( filter_algorithm=[algo1, algo2], attributes=[attribute], relative=relative, get_category=lambda run1, run2: run1["domain"]) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: print(config) for rev1, rev2 in itertools.combinations(self._revisions, 2): print(rev1, rev2) for attribute in self.get_supported_attributes( config.nick, attributes): print(attribute) make_scatter_plot(config.nick, rev1, rev2, attribute) for nick1, nick2, rev1, rev2, attribute in additional: make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) self.add_step(step_name, make_scatter_plots)
15,080
Python
36.796992
94
0.616048
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1000/v14-optimal.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue1000-base", "issue1000-v14"] CONFIGS = [ IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_comparison_table_step() exp.run_steps()
1,266
Python
22.90566
68
0.718799
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1000/v14-satisficing.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue1000-base", "issue1000-v14"] CONFIGS = [ IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_comparison_table_step() exp.run_steps()
1,306
Python
23.203703
68
0.722818
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1000/satisficing.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment def make_comparison_table(): report = common_setup.ComparativeReport( algorithm_pairs=[ ("issue1000-base-lama-first", "issue1000-v11-lama-first"), ("issue1000-base-lama-first", "issue1000-v12-lama-first"), ("issue1000-base-lama-first", "issue1000-v13-lama-first"), ], attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, ) outfile = os.path.join( exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) ) report(exp.eval_dir, outfile) exp.add_report(report) DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue1000-base", "issue1000-v11", "issue1000-v12", "issue1000-v13"] CONFIGS = [ IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_step("comparison table", make_comparison_table) exp.run_steps()
1,931
Python
26.211267
72
0.691352
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1000/optimal.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment def make_comparison_table(): report = common_setup.ComparativeReport( algorithm_pairs=[ ("issue1000-base-seq-opt-bjolp", "issue1000-v11-seq-opt-bjolp"), ("issue1000-base-seq-opt-bjolp", "issue1000-v12-seq-opt-bjolp"), ("issue1000-base-seq-opt-bjolp", "issue1000-v13-seq-opt-bjolp"), ], attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, ) outfile = os.path.join( exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) ) report(exp.eval_dir, outfile) exp.add_report(report) DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue1000-base", "issue1000-v11", "issue1000-v12", "issue1000-v13"] CONFIGS = [ IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"], ) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=2) exp = common_setup.IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_step("comparison table", make_comparison_table) exp.run_steps()
1,909
Python
26.285714
76
0.687795
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue735/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,171
Python
35.715026
79
0.613859
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue735/custom-parser.py
#! /usr/bin/env python from lab.parser import Parser def add_dominance_pruning_failed(content, props): if "dominance_pruning=False" in content: failed = False elif "pdb_collection_construction_time" not in props: failed = False else: failed = "dominance_pruning_time" not in props props["dominance_pruning_failed"] = int(failed) def main(): print "Running custom parser" parser = Parser() parser.add_pattern( "pdb_collection_construction_time", "^PDB collection construction time: (.+)s$", type=float, flags="M", required=False) parser.add_pattern( "dominance_pruning_time", "^Dominance pruning took (.+)s$", type=float, flags="M", required=False) parser.add_pattern( "dominance_pruning_pruned_subsets", "Pruned (\d+) of \d+ maximal additive subsets", type=int, required=False) parser.add_pattern( "dominance_pruning_pruned_pdbs", "Pruned (\d+) of \d+ PDBs", type=int, required=False) parser.add_function(add_dominance_pruning_failed) parser.parse() main()
1,069
Python
31.424241
127
0.667914
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue735/v3-no-pruning.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue735-v3"] BUILD_OPTIONS = ["release32nolp"] DRIVER_OPTIONS = ["--build", "release32nolp"] CONFIGS = [ IssueConfig( "cpdbs-{nick}-pruning-{pruning}".format(**locals()), ["--search", "astar(cpdbs({generator}, dominance_pruning={pruning}))".format(**locals())], build_options=BUILD_OPTIONS, driver_options=DRIVER_OPTIONS) for nick, generator in [("sys2", "systematic(2)"), ("hc", "hillclimbing(max_time=900)")] for pruning in [False, True] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) IssueExperiment.DEFAULT_TABLE_ATTRIBUTES += [ "dominance_pruning_failed", "dominance_pruning_time", "dominance_pruning_pruned_subsets", "dominance_pruning_pruned_pdbs", "pdb_collection_construction_time"] exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_resource("custom_parser", "custom-parser.py") exp.add_command("run-custom-parser", ["{custom_parser}"]) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
1,703
Python
30.555555
98
0.714621
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue735/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue735-base", "issue735-v1"] BUILD_OPTIONS = ["release32nolp"] DRIVER_OPTIONS = ["--build", "release32nolp", "--search-time-limit", "30m"] CONFIGS = [ IssueConfig( "cpdbs-sys2", ["--search", "astar(cpdbs(systematic(2)))"], build_options=BUILD_OPTIONS, driver_options=DRIVER_OPTIONS), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) IssueExperiment.DEFAULT_TABLE_ATTRIBUTES += [ "dominance_pruning_failed", "dominance_pruning_time", "dominance_pruning_pruned_subsets", "dominance_pruning_pruned_pdbs", "pdb_collection_construction_time"] exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_resource("custom_parser", "custom-parser.py") exp.add_command("run-custom-parser", ["{custom_parser}"]) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_fetcher(name="parse-again", parsers=["custom-parser.py"]) exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
2,047
Python
31
93
0.681485
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue735/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue735/v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue735-base", "issue735-v1", "issue735-v2", "issue735-v3"] BUILD_OPTIONS = ["release32nolp"] DRIVER_OPTIONS = ["--build", "release32nolp"] CONFIGS = [ IssueConfig( "cpdbs-sys2", ["--search", "astar(cpdbs(systematic(2)))"], build_options=BUILD_OPTIONS, driver_options=DRIVER_OPTIONS), IssueConfig( "cpdbs-hc", ["--search", "astar(cpdbs(hillclimbing(max_time=900)))"], build_options=BUILD_OPTIONS, driver_options=DRIVER_OPTIONS), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) IssueExperiment.DEFAULT_TABLE_ATTRIBUTES += [ "dominance_pruning_failed", "dominance_pruning_time", "dominance_pruning_pruned_subsets", "dominance_pruning_pruned_pdbs", "pdb_collection_construction_time"] exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_algorithm( "issue735-v3:cpdbs-sys2-debug", common_setup.get_repo_base(), "issue735-v3", ["--search", "astar(cpdbs(systematic(2)))"], build_options=["debug32nolp"], driver_options=["--build", "debug32nolp"]) exp.add_resource("custom_parser", "custom-parser.py") exp.add_command("run-custom-parser", ["{custom_parser}"]) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
2,400
Python
31.013333
93
0.66875
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue468/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return (node.endswith("cluster") or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" # TODO: Once we have reference results, we should add "quality". # TODO: Add something about errors/exit codes. DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for attribute in valid_attributes: name = "-".join([rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,734
Python
35.594827
79
0.608685
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue468/issue468.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup CONFIGS = { 'astar_lmcount_lm_merged_rhw_hm': [ '--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)'], } exp = common_setup.IssueExperiment( search_revisions=["issue468-base", "issue468-v1"], configs=CONFIGS, suite=suites.suite_optimal_with_ipc11(), ) exp.add_comparison_table_step() exp()
462
Python
19.130434
85
0.640693
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue425/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return (node.endswith("cluster.bc2.ch") or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" # TODO: Once we have reference results, we should add "quality". # TODO: Add something about errors/exit codes. DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for attribute in valid_attributes: name = "-".join([rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,741
Python
35.614942
79
0.608743
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue425/opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites, configs from downward.reports.compare import CompareConfigsReport import common_setup REVISIONS = ["issue425-base", "issue425-v1"] CONFIGS = configs.default_configs_optimal() # remove config that is disabled in this branch del CONFIGS['astar_selmax_lmcut_lmcount'] exp = common_setup.IssueExperiment( search_revisions=REVISIONS, configs=CONFIGS, suite=suites.suite_optimal_with_ipc11(), limits={"search_time": 300} ) exp.add_absolute_report_step() exp.add_comparison_table_step() def grouped_configs_to_compare(config_nicks): grouped_configs = [] for config_nick in config_nicks: col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS] grouped_configs.append((col_names[0], col_names[1], 'Diff - %s' % config_nick)) return grouped_configs exp.add_report(CompareConfigsReport( compared_configs=grouped_configs_to_compare(configs.configs_optimal_core()), attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, ), outfile="issue425-opt-compare-core-configs.html" ) def add_first_run_search_time(run): if run.get("search_time_all", []): run["first_run_search_time"] = run["search_time_all"][0] return run exp.add_report(CompareConfigsReport( compared_configs=grouped_configs_to_compare(configs.configs_optimal_ipc()), attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"], filter=add_first_run_search_time, ), outfile="issue425-opt-compare-portfolio-configs.html" ) exp()
1,764
Python
32.301886
113
0.650794
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue425/sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites, configs from downward.reports.compare import CompareConfigsReport import common_setup REVISIONS = ["issue425-base", "issue425-v1"] exp = common_setup.IssueExperiment( search_revisions=REVISIONS, configs=configs.default_configs_satisficing(), suite=suites.suite_satisficing_with_ipc11(), limits={"search_time": 300} ) exp.add_absolute_report_step() exp.add_comparison_table_step() def grouped_configs_to_compare(config_nicks): grouped_configs = [] for config_nick in config_nicks: col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS] grouped_configs.append((col_names[0], col_names[1], 'Diff - %s' % config_nick)) return grouped_configs exp.add_report(CompareConfigsReport( compared_configs=grouped_configs_to_compare(configs.configs_satisficing_core()), attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, ), outfile="issue425-sat-compare-core-configs.html" ) def add_first_run_search_time(run): if run.get("search_time_all", []): run["first_run_search_time"] = run["search_time_all"][0] return run exp.add_report(CompareConfigsReport( compared_configs=grouped_configs_to_compare(configs.configs_satisficing_ipc()), attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"], filter=add_first_run_search_time, ), outfile="issue425-sat-compare-portfolio-configs.html" ) exp()
1,671
Python
33.122448
113
0.645721
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue835/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,153
Python
35.955613
82
0.615205
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue835/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue835-base", "issue835-v1"] CONFIGS = [ IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first', '--overall-time-limit', '5m']), ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_comparison_table_step() exp.run_steps()
1,214
Python
26.613636
106
0.751236
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue835/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue650/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=None, **kwargs): """ If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(os.path.join(repo, "benchmarks"), suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,481
Python
33.963585
83
0.594904
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue650/suites.py
# Benchmark suites from the Fast Downward benchmark collection. def suite_alternative_formulations(): return ['airport-adl', 'no-mprime', 'no-mystery'] def suite_ipc98_to_ipc04_adl(): return [ 'assembly', 'miconic-fulladl', 'miconic-simpleadl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'schedule', ] def suite_ipc98_to_ipc04_strips(): return [ 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', 'satellite', 'zenotravel', ] def suite_ipc98_to_ipc04(): # All IPC1-4 domains, including the trivial Movie. return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) def suite_ipc06_adl(): return [ 'openstacks', 'pathways', 'trucks', ] def suite_ipc06_strips_compilations(): return [ 'openstacks-strips', 'pathways-noneg', 'trucks-strips', ] def suite_ipc06_strips(): return [ 'pipesworld-tankage', 'rovers', 'storage', 'tpp', ] def suite_ipc06(): return sorted(suite_ipc06_adl() + suite_ipc06_strips()) def suite_ipc08_common_strips(): return [ 'parcprinter-08-strips', 'pegsol-08-strips', 'scanalyzer-08-strips', ] def suite_ipc08_opt_adl(): return ['openstacks-opt08-adl'] def suite_ipc08_opt_strips(): return sorted(suite_ipc08_common_strips() + [ 'elevators-opt08-strips', 'openstacks-opt08-strips', 'sokoban-opt08-strips', 'transport-opt08-strips', 'woodworking-opt08-strips', ]) def suite_ipc08_opt(): return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) def suite_ipc08_sat_adl(): return ['openstacks-sat08-adl'] def suite_ipc08_sat_strips(): return sorted(suite_ipc08_common_strips() + [ # Note: cyber-security is missing. 'elevators-sat08-strips', 'openstacks-sat08-strips', 'sokoban-sat08-strips', 'transport-sat08-strips', 'woodworking-sat08-strips', ]) def suite_ipc08_sat(): return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) def suite_ipc08(): return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) def suite_ipc11_opt(): return [ 'barman-opt11-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'nomystery-opt11-strips', 'openstacks-opt11-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'pegsol-opt11-strips', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'tidybot-opt11-strips', 'transport-opt11-strips', 'visitall-opt11-strips', 'woodworking-opt11-strips', ] def suite_ipc11_sat(): return [ 'barman-sat11-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'nomystery-sat11-strips', 'openstacks-sat11-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'pegsol-sat11-strips', 'scanalyzer-sat11-strips', 'sokoban-sat11-strips', 'tidybot-sat11-strips', 'transport-sat11-strips', 'visitall-sat11-strips', 'woodworking-sat11-strips', ] def suite_ipc11(): return sorted(suite_ipc11_opt() + suite_ipc11_sat()) def suite_ipc14_agl_adl(): return [ 'cavediving-agl14-adl', 'citycar-agl14-adl', 'maintenance-agl14-adl', ] def suite_ipc14_agl_strips(): return [ 'barman-agl14-strips', 'childsnack-agl14-strips', 'floortile-agl14-strips', 'ged-agl14-strips', 'hiking-agl14-strips', 'openstacks-agl14-strips', 'parking-agl14-strips', 'tetris-agl14-strips', 'thoughtful-agl14-strips', 'transport-agl14-strips', 'visitall-agl14-strips', ] def suite_ipc14_agl(): return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) def suite_ipc14_mco_adl(): return [ 'cavediving-mco14-adl', 'citycar-mco14-adl', 'maintenance-mco14-adl', ] def suite_ipc14_mco_strips(): return [ 'barman-mco14-strips', 'childsnack-mco14-strips', 'floortile-mco14-strips', 'ged-mco14-strips', 'hiking-mco14-strips', 'openstacks-mco14-strips', 'parking-mco14-strips', 'tetris-mco14-strips', 'thoughtful-mco14-strips', 'transport-mco14-strips', 'visitall-mco14-strips', ] def suite_ipc14_mco(): return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) def suite_ipc14_opt_adl(): return [ 'cavediving-opt14-adl', 'citycar-opt14-adl', 'maintenance-opt14-adl', ] def suite_ipc14_opt_strips(): return [ 'barman-opt14-strips', 'childsnack-opt14-strips', 'floortile-opt14-strips', 'ged-opt14-strips', 'hiking-opt14-strips', 'openstacks-opt14-strips', 'parking-opt14-strips', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'transport-opt14-strips', 'visitall-opt14-strips', ] def suite_ipc14_opt(): return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) def suite_ipc14_sat_adl(): return [ 'cavediving-sat14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_sat_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_sat(): return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) def suite_ipc14(): return sorted( suite_ipc14_agl() + suite_ipc14_mco() + suite_ipc14_opt() + suite_ipc14_sat()) def suite_unsolvable(): # TODO: Add other unsolvable problems (Miconic-FullADL). # TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl' # if the extra-domains branch is merged. return sorted( ['mystery:prob%02d.pddl' % index for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) def suite_optimal_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_opt_adl()) def suite_optimal_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + suite_ipc11_opt()) def suite_optimal(): return sorted(suite_optimal_adl() + suite_optimal_strips()) def suite_satisficing_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_sat_adl()) def suite_satisficing_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + suite_ipc11_sat()) def suite_satisficing(): return sorted(suite_satisficing_adl() + suite_satisficing_strips()) def suite_all(): return sorted( suite_ipc98_to_ipc04() + suite_ipc06() + suite_ipc06_strips_compilations() + suite_ipc08() + suite_ipc11() + suite_alternative_formulations())
7,695
Python
23.35443
77
0.596231
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue650/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport import suites configs = [ IssueConfig( "cegar-landmarks-10k", ["--search", "astar(cegar(subtasks=[landmarks()],max_states=10000))"]), IssueConfig( "cegar-landmarks-goals-900s", ["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"]), ] revisions = ["issue650-base", "issue650-v1"] exp = IssueExperiment( revisions=revisions, configs=configs, suite=suites.suite_optimal_strips(), test_suite=["depot:pfile1"], email="[email protected]", ) exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) ) exp()
1,151
Python
27.09756
86
0.618593
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue650/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
Python
35.654205
84
0.597042
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue650/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport import suites configs = [ IssueConfig( "cegar-landmarks-10k", ["--search", "astar(cegar(subtasks=[landmarks()],max_states=10000))"]), IssueConfig( "cegar-landmarks-goals-900s-debug", ["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"], build_options=["--debug"], driver_options=["--debug"], ), ] revisions = ["issue650-base", "issue650-v2"] exp = IssueExperiment( revisions=revisions, configs=configs, suite=suites.suite_optimal_strips(), test_suite=["depot:pfile1"], email="[email protected]", ) exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) ) exp()
1,238
Python
27.15909
86
0.607431
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue862/v5.py
#! /usr/bin/env python2 # -*- coding: utf-8 -*- from collections import defaultdict import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab import tools from downward.reports.compare import ComparativeReport from downward.reports import PlanningReport import common_setup from common_setup import IssueConfig, IssueExperiment EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue862-base", "issue862-v1", "issue862-v4", "issue862-v5"] CONFIGS = [ IssueConfig( "translate-only", [], driver_options=["--translate"]) ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) # This was generated by running "./suites.py all" in the benchmarks # repository. SUITE = [ 'agricola-opt18-strips', 'agricola-sat18-strips', 'airport', 'airport-adl', 'assembly', 'barman-mco14-strips', 'barman-opt11-strips', 'barman-opt14-strips', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-opt18-adl', 'caldera-sat18-adl', 'caldera-split-opt18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-opt14-strips', 'childsnack-sat14-strips', 'citycar-opt14-adl', 'citycar-sat14-adl', 'data-network-opt18-strips', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-opt11-strips', 'floortile-opt14-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-opt14-strips', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-agl14-strips', 'hiking-opt14-strips', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-opt14-adl', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'no-mprime', 'no-mystery', 'nomystery-opt11-strips', 'nomystery-sat11-strips', 'nurikabe-opt18-adl', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-agl14-strips', 'openstacks-opt08-adl', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-opt18-strips', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-opt18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parcprinter-sat11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pegsol-sat11-strips', 'petri-net-alignment-opt18-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-opt18-adl', 'settlers-sat18-adl', 'snake-opt18-strips', 'snake-sat18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-opt18-strips', 'spider-sat18-strips', 'storage', 'termes-opt18-strips', 'termes-sat18-strips', 'tetris-opt14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'thoughtful-sat14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel', ] if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser("translator_additional_parser.py") del exp.commands['remove-output-sas'] class TranslatorDiffReport(PlanningReport): def get_cell(self, run): return ";".join(run.get(attr) for attr in self.attributes) def get_text(self): lines = [] for runs in self.problem_runs.values(): hashes = set([r.get("translator_output_sas_hash") for r in runs]) if len(hashes) > 1 or None in hashes: lines.append(";".join([self.get_cell(r) for r in runs])) return "\n".join(lines) class SameValueFilters(object): """Ignore runs for a task where all algorithms have the same value.""" def __init__(self, attribute): self._attribute = attribute self._tasks_to_values = defaultdict(list) def _get_task(self, run): return (run['domain'], run['problem']) def store_values(self, run): value = run.get(self._attribute) self._tasks_to_values[self._get_task(run)].append(value) # Don't filter this run, yet. return True def filter_tasks_with_equal_values(self, run): values = self._tasks_to_values[self._get_task(run)] return len(set(values)) != 1 exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_parse_again_step() exp.add_fetcher(name='fetch') ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"] exp.add_absolute_report_step( outfile=os.path.join(exp.eval_dir, "{EXPNAME}.html".format(**locals())), attributes=ATTRIBUTES) same_value_flters = SameValueFilters("translator_output_sas_hash") exp.add_absolute_report_step( outfile=os.path.join(exp.eval_dir, "{EXPNAME}-filtered.html".format(**locals())), attributes=ATTRIBUTES, filter=[same_value_flters.store_values, same_value_flters.filter_tasks_with_equal_values]) exp.add_report(TranslatorDiffReport( attributes=["domain", "problem", "algorithm", "run_dir"] ), outfile="different_output_sas.csv" ) exp.run_steps()
6,873
Python
27.404959
94
0.657646
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue862/v5-planner.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue862-base", "issue862-v5"] BUILDS = ["release32"] CONFIG_DICT = { "lazy-greedy-{h}".format(**locals()): [ "--evaluator", "h={h}()".format(**locals()), "--search", "lazy_greedy([h], preferred=[h])"] for h in ["hmax", "add", "ff", "cg", "cea"] } CONFIG_DICT["lama-first"] = [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""] CONFIG_DICT["blind"] = ["--search", "astar(blind())"] CONFIGS = [ IssueConfig( "-".join([config_nick, build]), config, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for build in BUILDS for config_nick, config in CONFIG_DICT.items() ] SUITE = [ "airport-adl", "assembly", "miconic-fulladl", "psr-large", "psr-middle", ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") #exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
2,260
Python
27.620253
103
0.657965
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue862/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, outfile=None, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = outfile or os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-' + os.path.basename(outfile), subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,824
Python
36.531645
90
0.618591
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue862/translator_additional_parser.py
#!/usr/bin/env python import hashlib from lab.parser import Parser def add_hash_value(content, props): props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest() parser = Parser() parser.add_function(add_hash_value, file="output.sas") parser.parse()
273
Python
20.076922
77
0.739927
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue862/v1.py
#! /usr/bin/env python2 # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab import tools from downward.reports.compare import ComparativeReport from downward.reports import PlanningReport import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue862-base", "issue862-v1", "issue862-v2", "issue862-v3"] CONFIGS = [ IssueConfig( "translate-only", [], driver_options=["--translate"]) ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) # This was generated by running "./suites.py all" in the benchmarks # repository. I don't know if this is there is a better way of doing # this. SUITE = [ 'agricola-opt18-strips', 'agricola-sat18-strips', 'airport', 'airport-adl', 'assembly', 'barman-mco14-strips', 'barman-opt11-strips', 'barman-opt14-strips', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-opt18-adl', 'caldera-sat18-adl', 'caldera-split-opt18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-opt14-strips', 'childsnack-sat14-strips', 'citycar-opt14-adl', 'citycar-sat14-adl', 'data-network-opt18-strips', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-opt11-strips', 'floortile-opt14-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-opt14-strips', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-agl14-strips', 'hiking-opt14-strips', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-opt14-adl', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'no-mprime', 'no-mystery', 'nomystery-opt11-strips', 'nomystery-sat11-strips', 'nurikabe-opt18-adl', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-agl14-strips', 'openstacks-opt08-adl', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-opt18-strips', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-opt18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parcprinter-sat11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pegsol-sat11-strips', 'petri-net-alignment-opt18-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-opt18-adl', 'settlers-sat18-adl', 'snake-opt18-strips', 'snake-sat18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-opt18-strips', 'spider-sat18-strips', 'storage', 'termes-opt18-strips', 'termes-sat18-strips', 'tetris-opt14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'thoughtful-sat14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel', ] if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource("translator_additional_parser", "translator_additional_parser.py", dest="translator_additional_parser.py") del exp.commands['remove-output-sas'] exp.add_command("translator_additional_parser", ["{translator_additional_parser}"]) class TranslatorDiffReport(PlanningReport): def get_cell(self, run): return ";".join(run.get(attr) for attr in self.attributes) def get_text(self): lines = [] for runs in self.problem_runs.values(): hashes = set([r.get("translator_output_sas_hash") for r in runs]) if len(hashes) > 1 or None in hashes: lines.append(";".join([self.get_cell(r) for r in runs])) return "\n".join(lines) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_report(TranslatorDiffReport( attributes=["domain", "problem", "algorithm", "run_dir"] ), outfile="different_output_sas.csv" ) exp.run_steps()
5,753
Python
26.141509
77
0.649053
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue862/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v8-blind.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v7-base", "issue693-v7", "issue693-v8"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step() exp.run_steps()
1,219
Python
24.957446
80
0.707137
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,198
Python
35.689922
79
0.614241
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v3-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v2", "issue693-v3"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ("divpot", "astar(diverse_potentials())"), ("lmcut", "astar(lmcut())"), ("cegar", "astar(cegar())"), ("systematic2", "astar(cpdbs(systematic(2)))"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() attributes = [ "coverage", "error", "expansions_until_last_jump", "memory", "score_memory", "total_time", "score_total_time"] # Compare revisions. # lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 # lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals())) exp.run_steps()
2,171
Python
29.591549
80
0.659143
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v7-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v7-base", "issue693-v7"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ("lmcut", "astar(lmcut())"), ("cegar", "astar(cegar())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step() exp.run_steps()
1,270
Python
24.938775
80
0.694488
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v4-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v4-base", "issue693-v4"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ("divpot", "astar(diverse_potentials())"), ("lmcut", "astar(lmcut())"), ("cegar", "astar(cegar())"), ("systematic2", "astar(cpdbs(systematic(2)))"), ("mas", "astar(merge_and_shrink(" "shrink_strategy=shrink_bisimulation(greedy=false)," "merge_strategy=merge_stateless(" "merge_selector=score_based_filtering(" "scoring_functions=[goal_relevance,dfp,total_order]))," "label_reduction=exact(before_shrinking=true,before_merging=false)," "max_states=50000,threshold_before_merge=1))") ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare revisions. for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals())) exp.run_steps()
2,393
Python
30.92
80
0.663184
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v6-blind.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v5", "issue693-v6"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build, "--search-time-limit", "1m"]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) # Compare revisions. for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport( algorithm_pairs, attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals())) for config_nick, search in SEARCHES: algorithms = [ "{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals())] for attribute in ["total_time", "memory"]: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=algorithms, get_category=lambda run1, run2: run1["domain"]), name="issue693-relative-scatter-{config_nick}-{build}-{rev1}-vs-{rev2}-{attribute}".format(**locals())) exp.run_steps()
2,443
Python
32.479452
123
0.615227
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v2-blind.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-base", "issue693-v1", "issue693-v2"] BUILDS = ["release32"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
1,235
Python
24.224489
80
0.708502
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v7-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v7-base", "issue693-v7"] BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( "lama-first-{build}".format(**locals()), [], build_options=[build], driver_options=["--build", build, "--alias", "lama-first"]) for build in BUILDS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step() exp.run_steps()
1,138
Python
25.488372
80
0.717926
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v5-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v5-base", "issue693-v5"] BUILDS = ["release32"] SEARCHES = [ ("blind", "astar(blind())"), ("divpot", "astar(diverse_potentials())"), ("lmcut", "astar(lmcut())"), ("cegar", "astar(cegar())"), ("systematic2", "astar(cpdbs(systematic(2)))"), ("mas", "astar(merge_and_shrink(" "shrink_strategy=shrink_bisimulation(greedy=false)," "merge_strategy=merge_stateless(" "merge_selector=score_based_filtering(" "scoring_functions=[goal_relevance,dfp,total_order]))," "label_reduction=exact(before_shrinking=true,before_merging=false)," "max_states=50000,threshold_before_merge=1))") ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build, "--search-time-limit", "5m"]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare revisions. for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals())) exp.run_steps()
2,409
Python
31.133333
80
0.6621
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v3-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v2", "issue693-v3"] BUILDS = ["release32", "release64"] SEARCHES = [ ("ff_lazy", "lazy_greedy(ff())"), ("cg_eager", "eager_greedy(cg())"), ("lm_rhw", "lazy_greedy(lmcount(lm_rhw()))"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() attributes = [ "coverage", "error", "expansions_until_last_jump", "memory", "score_memory", "total_time", "score_total_time"] # Compare revisions. # lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 # lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue693-sat-{rev1}-vs-{rev2}-{build}".format(**locals())) exp.run_steps()
2,105
Python
29.521739
80
0.662708
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/v4-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue693-v4-base", "issue693-v4"] BUILDS = ["release32", "release64"] SEARCHES = [ ("ff_lazy", ["--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"]), ("add_lazy", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), ("ff_eager", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), search, build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] + [ IssueConfig( "lama-first-{build}".format(**locals()), [], build_options=[build], driver_options=["--build", build, "--alias", "lama-first"]) for build in BUILDS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare revisions. for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick in [nick for nick, _ in SEARCHES] + ["lama-first"]] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue693-sat-{rev1}-vs-{rev2}-{build}".format(**locals())) exp.run_steps()
2,295
Python
30.888888
88
0.647059
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/hash-microbenchmark/main.cc
#include <algorithm> #include <ctime> #include <functional> #include <iostream> #include <string> #include <unordered_set> #include "fast_hash.h" #include "hash.h" #include "SpookyV2.h" using namespace std; static void benchmark(const string &desc, int num_calls, const function<void()> &func) { cout << "Running " << desc << " " << num_calls << " times:" << flush; clock_t start = clock(); for (int j = 0; j < num_calls; ++j) func(); clock_t end = clock(); double duration = static_cast<double>(end - start) / CLOCKS_PER_SEC; cout << " " << duration << "s" << endl; } static int scramble(int i) { return (0xdeadbeef * i) ^ 0xfeedcafe; } #define rot32(x, k) (((x) << (k)) | ((x) >> (32 - (k)))) #define mix32(a, b, c) \ { \ a -= c; a ^= rot32(c, 4); c += b; \ b -= a; b ^= rot32(a, 6); a += c; \ c -= b; c ^= rot32(b, 8); b += a; \ a -= c; a ^= rot32(c, 16); c += b; \ b -= a; b ^= rot32(a, 19); a += c; \ c -= b; c ^= rot32(b, 4); b += a; \ } #define final32(a, b, c) \ { \ c ^= b; c -= rot32(b, 14); \ a ^= c; a -= rot32(c, 11); \ b ^= a; b -= rot32(a, 25); \ c ^= b; c -= rot32(b, 16); \ a ^= c; a -= rot32(c, 4); \ b ^= a; b -= rot32(a, 14); \ c ^= b; c -= rot32(b, 24); \ } inline unsigned int hash_unsigned_int_sequence( const int *k, unsigned int length, unsigned int initval) { unsigned int a, b, c; // Set up the internal state. a = b = c = 0xdeadbeef + (length << 2) + initval; // Handle most of the key. while (length > 3) { a += k[0]; b += k[1]; c += k[2]; mix32(a, b, c); length -= 3; k += 3; } // Handle the last 3 unsigned ints. All case statements fall through. switch (length) { case 3: c += k[2]; case 2: b += k[1]; case 1: a += k[0]; final32(a, b, c); // case 0: nothing left to add. case 0: break; } return c; } struct BurtleBurtleHash { std::size_t operator()(const std::vector<int> &vec) const { return hash_unsigned_int_sequence(vec.data(), vec.size(), 2016); } }; using BurtleBurtleHashSet = std::unordered_set<vector<int>, BurtleBurtleHash>; struct HashWordHash { std::size_t operator()(const std::vector<int> &vec) const { utils::HashState hash_state; hash_state.feed_ints(vec.data(), vec.size()); return hash_state.get_hash64(); } }; struct SpookyV2Hash { std::size_t operator()(const std::vector<int> &vec) const { return SpookyHash::Hash64(vec.data(), vec.size() * 4, 2016); } }; struct SpookyV2HashInt { std::size_t operator()(int i) const { return SpookyHash::Hash64(&i, sizeof(int), 2016); } }; int main(int, char **) { const int REPETITIONS = 2; const int NUM_CALLS = 100000; const int NUM_INSERTIONS = 100; for (int i = 0; i < REPETITIONS; ++i) { benchmark("nothing", NUM_CALLS, [] () {}); cout << endl; benchmark("insert int with BoostHash", NUM_CALLS, [&]() { unordered_set<int> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { s.insert(scramble(i)); } }); benchmark("insert int with BoostHashFeed", NUM_CALLS, [&]() { fast_hash::HashSet<int> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { s.insert(scramble(i)); } }); benchmark("insert int with BurtleFeed", NUM_CALLS, [&]() { utils::HashSet<int> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { s.insert(scramble(i)); } }); benchmark("insert int with SpookyHash", NUM_CALLS, [&]() { std::unordered_set<int, SpookyV2HashInt> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { s.insert(scramble(i)); } }); cout << endl; benchmark("insert pair<int, int> with BoostHash", NUM_CALLS, [&]() { unordered_set<pair<int, int>> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { s.insert(make_pair(scramble(i), scramble(i + 1))); } }); benchmark("insert pair<int, int> with BoostHashFeed", NUM_CALLS, [&]() { fast_hash::HashSet<pair<int, int>> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { s.insert(make_pair(scramble(i), scramble(i + 1))); } }); benchmark("insert pair<int, int> with BurtleFeed", NUM_CALLS, [&]() { utils::HashSet<pair<int, int>> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { s.insert(make_pair(scramble(i), scramble(i + 1))); } }); cout << endl; for (int length : {1, 10, 100} ) { benchmark( "insert vector<int> of size " + to_string(length) + " with BoostHash", NUM_CALLS, [&]() { unordered_set<vector<int>> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { vector<int> v; v.reserve(length); for (int j = 0; j < length; ++j) { v.push_back(scramble(NUM_INSERTIONS * length + j)); } s.insert(v); } }); benchmark( "insert vector<int> of size " + to_string(length) + " with BoostHashFeed", NUM_CALLS, [&]() { fast_hash::HashSet<vector<int>> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { vector<int> v; v.reserve(length); for (int j = 0; j < length; ++j) { v.push_back(scramble(NUM_INSERTIONS * length + j)); } s.insert(v); } }); benchmark( "insert vector<int> of size " + to_string(length) + " with BurtleVector", NUM_CALLS, [&]() { BurtleBurtleHashSet s; for (int i = 0; i < NUM_INSERTIONS; ++i) { vector<int> v; v.reserve(length); for (int j = 0; j < length; ++j) { v.push_back(scramble(NUM_INSERTIONS * length + j)); } s.insert(v); } }); benchmark( "insert vector<int> of size " + to_string(length) + " with BurtleFeed", NUM_CALLS, [&]() { utils::HashSet<vector<int>> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { vector<int> v; v.reserve(length); for (int j = 0; j < length; ++j) { v.push_back(scramble(NUM_INSERTIONS * length + j)); } s.insert(v); } }); benchmark( "insert vector<int> of size " + to_string(length) + " with BurtleFeedVector", NUM_CALLS, [&]() { std::unordered_set<vector<int>, HashWordHash> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { vector<int> v; v.reserve(length); for (int j = 0; j < length; ++j) { v.push_back(scramble(NUM_INSERTIONS * length + j)); } s.insert(v); } }); benchmark( "insert vector<int> of size " + to_string(length) + " with SpookyHash", NUM_CALLS, [&]() { std::unordered_set<vector<int>, SpookyV2Hash> s; for (int i = 0; i < NUM_INSERTIONS; ++i) { vector<int> v; v.reserve(length); for (int j = 0; j < length; ++j) { v.push_back(scramble(NUM_INSERTIONS * length + j)); } s.insert(v); } }); cout << endl; } cout << endl; } return 0; }
9,166
C++
32.334545
79
0.397883
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue693/hash-microbenchmark/hash.h
#ifndef UTILS_HASH_H #define UTILS_HASH_H #include <cassert> #include <cstddef> #include <cstdint> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> namespace utils { /* We provide a family of hash functions that are supposedly higher quality than what is guaranteed by the standard library. Changing a single bit in the input should typically change around half of the bits in the final hash value. The hash functions we previously used turned out to cluster when we tried hash tables with open addressing for state registries. The low-level hash functions are based on lookup3.c by Bob Jenkins, May 2006, public domain. See http://www.burtleburtle.net/bob/c/lookup3.c. To hash an object x, it is represented as a sequence of 32-bit pieces (called the "code" for x, written code(x) in the following) that are "fed" to the main hashing function (implemented in class HashState) one by one. This allows a compositional approach to hashing. For example, the code for a pair p is the concatenation of code(x.first) and code(x.second). A simpler compositional approach to hashing would first hash the components of an object and then combine the hash values, and this is what a previous version of our code did. The approach with an explicit HashState object is stronger because the internal hash state is larger (96 bits) than the final hash value and hence pairs <x, y> and <x', y> where x and x' have the same hash value don't necessarily collide. Another advantage of our approach is that we can use the same overall hashing approach to generate hash values of different types (e.g. 32-bit vs. 64-bit unsigned integers). To extend the hashing mechanism to further classes, provide a template specialization for the "feed" function. This must satisfy the following requirements: A) If x and y are objects of the same type, they should have code(x) = code(y) iff x = y. That is, the code sequence should uniquely describe each logically distinct object. This requirement avoids unnecessary hash collisions. Of course, there will still be "necessary" hash collisions because different code sequences can collide in the low-level hash function. B) To play nicely with composition, we additionally require that feed implements a prefix code, i.e., for objects x != y of the same type, code(x) must not be a prefix of code(y). This requirement makes it much easier to define non-colliding code sequences for composite objects such as pairs via concatenation: if <a, b> != <a', b'>, then code(a) != code(a') and code(b) != code(b') is *not* sufficient for concat(code(a), code(b)) != concat(code(a'), code(b')). However, if we require a prefix code, it *is* sufficient and the resulting code will again be a prefix code. Note that objects "of the same type" is meant as "logical type" rather than C++ type. For example, for objects such as vectors where we expect different-length vectors to be combined in the same containers (= have the same logical type), we include the length of the vector as the first element in the code to ensure the prefix code property. In contrast, for integer arrays encoding states, we *do not* include the length as a prefix because states of different sizes are considered to be different logical types and should not be mixed in the same container, even though they are represented by the same C++ type. */ static_assert(sizeof(unsigned int) == 4, "unsigned int has unexpected size"); /* Circular rotation (http://stackoverflow.com/a/31488147/224132). */ inline unsigned int rotate(unsigned int value, unsigned int offset) { return (value << offset) | (value >> (32 - offset)); } /* Internal class storing the state of the hashing process. It should only be instantiated by functions in this file. */ class HashState { std::uint32_t a, b, c; int pending_values; /* Mix the three 32-bit values bijectively. Any information in (a, b, c) before mix() is still in (a, b, c) after mix(). */ void mix() { a -= c; a ^= rotate(c, 4); c += b; b -= a; b ^= rotate(a, 6); a += c; c -= b; c ^= rotate(b, 8); b += a; a -= c; a ^= rotate(c, 16); c += b; b -= a; b ^= rotate(a, 19); a += c; c -= b; c ^= rotate(b, 4); b += a; } /* Final mixing of the three 32-bit values (a, b, c) into c. Triples of (a, b, c) differing in only a few bits will usually produce values of c that look totally different. */ void final_mix() { c ^= b; c -= rotate(b, 14); a ^= c; a -= rotate(c, 11); b ^= a; b -= rotate(a, 25); c ^= b; c -= rotate(b, 16); a ^= c; a -= rotate(c, 4); b ^= a; b -= rotate(a, 14); c ^= b; c -= rotate(b, 24); } public: HashState() : a(0xdeadbeef), b(a), c(a), pending_values(0) { } void feed_ints(const int *values, int length) { // Handle most of the key. while (length > 3) { a += values[0]; b += values[1]; c += values[2]; mix(); length -= 3; values += 3; } // Handle the last 3 unsigned ints. All case statements fall through. switch (length) { case 3: c += values[2]; case 2: b += values[1]; case 1: a += values[0]; final_mix(); // case 0: nothing left to add. case 0: break; } } void feed(std::uint32_t value) { assert(pending_values != -1); if (pending_values == 3) { mix(); pending_values = 0; } if (pending_values == 0) { a += value; ++pending_values; } else if (pending_values == 1) { b += value; ++pending_values; } else if (pending_values == 2) { c += value; ++pending_values; } } std::uint32_t get_hash32() { assert(pending_values != -1); if (pending_values) { /* pending_values == 0 can only hold if we never called feed(), i.e., if we are hashing an empty sequence. In this case we don't call final_mix for compatibility with the original hash function by Jenkins. */ final_mix(); } pending_values = -1; return c; } std::uint64_t get_hash64() { assert(pending_values != -1); if (pending_values) { // See comment for get_hash32. final_mix(); } pending_values = -1; return (static_cast<std::uint64_t>(b) << 32) | c; } }; /* These functions add a new object to an existing HashState object. To add hashing support for a user type X, provide an override for utils::feed(HashState &hash_state, const X &value). */ static_assert( sizeof(int) == sizeof(std::uint32_t), "int and uint32_t have different sizes"); inline void feed(HashState &hash_state, int value) { hash_state.feed(static_cast<std::uint32_t>(value)); } static_assert( sizeof(unsigned int) == sizeof(std::uint32_t), "unsigned int and uint32_t have different sizes"); inline void feed(HashState &hash_state, unsigned int value) { hash_state.feed(static_cast<std::uint32_t>(value)); } inline void feed(HashState &hash_state, std::uint64_t value) { hash_state.feed(static_cast<std::uint32_t>(value)); value >>= 32; hash_state.feed(static_cast<std::uint32_t>(value)); } template<typename T> void feed(HashState &hash_state, const T *p) { // This is wasteful in 32-bit mode, but we plan to discontinue 32-bit compiles anyway. feed(hash_state, reinterpret_cast<std::uint64_t>(p)); } template<typename T1, typename T2> void feed(HashState &hash_state, const std::pair<T1, T2> &p) { feed(hash_state, p.first); feed(hash_state, p.second); } template<typename T> void feed(HashState &hash_state, const std::vector<T> &vec) { /* Feed vector size to ensure that no two different vectors of the same type have the same code prefix. */ feed(hash_state, vec.size()); for (const T &item : vec) { feed(hash_state, item); } } /* Public hash functions. get_hash() is used internally by the HashMap and HashSet classes below. In more exotic use cases, such as implementing a custom hash table, you can also use `get_hash32()`, `get_hash64()` and `get_hash()` directly. */ template<typename T> std::uint32_t get_hash32(const T &value) { HashState hash_state; feed(hash_state, value); return hash_state.get_hash32(); } template<typename T> std::uint64_t get_hash64(const T &value) { HashState hash_state; feed(hash_state, value); return hash_state.get_hash64(); } template<typename T> std::size_t get_hash(const T &value) { return static_cast<std::size_t>(get_hash64(value)); } // This struct should only be used by HashMap and HashSet below. template<typename T> struct Hash { std::size_t operator()(const T &val) const { return get_hash(val); } }; /* Aliases for hash sets and hash maps in user code. All user code should use utils::UnorderedSet and utils::UnorderedMap instead of std::unordered_set and std::unordered_map. To hash types that are not supported out of the box, implement utils::feed. */ template<typename T1, typename T2> using HashMap = std::unordered_map<T1, T2, Hash<T1>>; template<typename T> using HashSet = std::unordered_set<T, Hash<T>>; /* Transitional aliases and functions */ template<typename T1, typename T2> using UnorderedMap = std::unordered_map<T1, T2>; template<typename T> using UnorderedSet = std::unordered_set<T>; template<typename T> inline void hash_combine(size_t &hash, const T &value) { std::hash<T> hasher; /* The combination of hash values is based on issue 6.18 in http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2005/n1756.pdf. Boost combines hash values in the same way. */ hash ^= hasher(value) + 0x9e3779b9 + (hash << 6) + (hash >> 2); } template<typename Sequence> size_t hash_sequence(const Sequence &data, size_t length) { size_t hash = 0; for (size_t i = 0; i < length; ++i) { hash_combine(hash, data[i]); } return hash; } } namespace std { template<typename T> struct hash<std::vector<T>> { size_t operator()(const std::vector<T> &vec) const { return utils::hash_sequence(vec, vec.size()); } }; template<typename TA, typename TB> struct hash<std::pair<TA, TB>> { size_t operator()(const std::pair<TA, TB> &pair) const { size_t hash = 0; utils::hash_combine(hash, pair.first); utils::hash_combine(hash, pair.second); return hash; } }; } #endif
11,173
C
28.877005
90
0.621319