file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue595/issue595-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(revisions=["issue595-base", "issue595-v1"])
120
Python
16.285712
48
0.65
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v7-sat-eager.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.suites import common_setup import configs NICKS = [ 'eager_greedy_alt_ff_cg', 'eager_greedy_ff', 'eager_greedy_ff_no_pref', 'eager_pareto_ff', 'eager_wa3_cg' ] CONFIGS = {} for nick in NICKS: CONFIGS[nick] = configs.default_configs_satisficing(ipc=False, extended=True)[nick] print(sorted(CONFIGS.keys())) print(len(CONFIGS)) SUITE = downward.suites.suite_satisficing_with_ipc11() exp = common_setup.IssueExperiment( search_revisions=["issue77-v7-base", "issue77-v7"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
697
Python
20.812499
87
0.695839
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v4-sat-eager.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.suites import common_setup import configs CONFIGS = configs.default_configs_satisficing(ipc=False, extended=False) # The following lines remove some configs that we don't currently # support. DISABLED = [ ] for key, value in list(CONFIGS.items()): if key in DISABLED or key.startswith(("lazy", "iterated", "ehc")): del CONFIGS[key] print(sorted(CONFIGS.keys())) print(len(CONFIGS)) SUITE = downward.suites.suite_satisficing_with_ipc11() exp = common_setup.IssueExperiment( search_revisions=["issue77-v3", "issue77-v4"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
750
Python
20.457142
72
0.706667
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport from relative_scatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None, relative=False): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) Use `relative=True` to create relative scatter plots. :: exp.add_scatter_plot_step(relative=True) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES if relative: scatter_plot_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "relative-scatter") else: scatter_plot_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = scatter_plot_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
13,172
Python
35.090411
79
0.611676
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/relative_scatter.py
from collections import defaultdict from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport EPSILON = 0.01 def get_relative_change(val1, val2): """ >>> get_relative_change(10, 0) -999.0 >>> get_relative_change(10, 1) -9.0 >>> get_relative_change(10, 5) -1.0 >>> get_relative_change(10, 10) 0.0 >>> get_relative_change(10, 15) 0.5 >>> get_relative_change(10, 20) 1.0 >>> get_relative_change(10, 100) 9.0 >>> get_relative_change(0, 10) 999.0 >>> get_relative_change(0, 0) 0.0 """ assert val1 >= 0, val1 assert val2 >= 0, val2 if val1 == 0: val1 = EPSILON if val2 == 0: val2 = EPSILON if val1 > val2: return 1 - val1 / float(val2) return val2 / float(val1) - 1 class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. If the value for config 1 is v1 and the value for config 2 is v2, the plot contains the point (v1, 1 - v1/v2) if v1 > v2 and the point (v1, v2/v1 - 1) otherwise. """ def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples. categories = defaultdict(list) self.ylim_bottom = 0 self.ylim_top = 0 for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 >= 0, (domain, problem, self.configs[0], val1) assert val2 >= 0, (domain, problem, self.configs[1], val2) x = val1 y = get_relative_change(val1, val2) categories[category].append((x, y)) self.ylim_bottom = min(self.ylim_bottom, y) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom *= 1.1 self.ylim_top *= 1.1 return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'linear')
2,744
Python
30.918604
72
0.582362
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-sat2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import common_setup import downward.suites # This experiment only tests the Lama-FF synergy, which sat1 did not # test because it did not work in the issue77 branch. CONFIGS = { "synergy": ["--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", "--search", "eager_greedy([hff,hlm],preferred=[hff,hlm])"], } SUITE = downward.suites.suite_satisficing_with_ipc11() exp = common_setup.IssueExperiment( search_revisions=["issue77-v3-base", "issue77-v3"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step() exp()
684
Python
24.370369
76
0.676901
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-sat1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.configs import downward.suites CONFIGS = downward.configs.default_configs_satisficing(extended=True) # The following lines remove some configs that we don't currently # support because the respective configurations are commented out DISABLED = [ "seq_sat_fdss_1", "seq_sat_fdss_2", "seq_sat_lama_2011", ] for key, value in list(CONFIGS.items()): if key in DISABLED or key.startswith(("lazy", "iterated", "ehc")): del CONFIGS[key] else: for pos, arg in enumerate(value): if ", pathmax=false" in arg: # pathmax is gone in this branch value[pos] = arg.replace(", pathmax=false", "") print(sorted(CONFIGS.keys())) print(len(CONFIGS)) SUITE = downward.suites.suite_satisficing_with_ipc11() import common_setup exp = common_setup.IssueExperiment( search_revisions=["issue77-base", "issue77-v1"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
1,088
Python
24.325581
70
0.668199
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-opt1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.configs import downward.suites # "ipc=False" skips portfolio configurations which we don't need to # test here. CONFIGS = downward.configs.default_configs_optimal(ipc=False, extended=True) # pathmax is gone in this branch, remove it: for key, value in list(CONFIGS.items()): for pos, arg in enumerate(value): if ", pathmax=false" in arg: value[pos] = arg.replace(", pathmax=false", "") # selmax is currently disabled del CONFIGS["astar_selmax_lmcut_lmcount"] SUITE = downward.suites.suite_optimal_with_ipc11() import common_setup exp = common_setup.IssueExperiment( search_revisions=["issue77-base", "issue77-v2"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
856
Python
24.205882
76
0.703271
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v7-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.suites import common_setup import configs CONFIGS = configs.default_configs_optimal(ipc=False, extended=False) print(sorted(CONFIGS.keys())) print(len(CONFIGS)) SUITE = downward.suites.suite_optimal_with_ipc11() SCATTER_ATTRIBUTES = ["total_time"] exp = common_setup.IssueExperiment( search_revisions=["issue77-v7-base", "issue77-v7"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(attributes=SCATTER_ATTRIBUTES, relative=True) exp()
594
Python
21.037036
71
0.734007
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v6-sat-ehc.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.suites import common_setup CONFIGS = { "ehc_ff": [ "--search", "ehc(ff())"], "ehc_add_pref": [ "--heuristic", "hadd=add()", "--search", "ehc(hadd, preferred=[hadd])"], #"ehc_add_ff_pref": [ # "--search", "ehc(add(), preferred=[ff()],preferred_usage=RANK_PREFERRED_FIRST)"], } SUITE = downward.suites.suite_satisficing_with_ipc11() exp = common_setup.IssueExperiment( search_revisions=["issue77-v6-base", "issue77-v6"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
669
Python
22.103448
90
0.61435
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v4-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.suites import common_setup import configs CONFIGS = configs.default_configs_optimal(ipc=False, extended=False) print(sorted(CONFIGS.keys())) print(len(CONFIGS)) SUITE = downward.suites.suite_optimal_with_ipc11() exp = common_setup.IssueExperiment( search_revisions=["issue77-v3", "issue77-v4"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
512
Python
17.999999
68
0.716797
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v5-sat-lazy.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.suites import common_setup import configs CONFIGS = {} INCLUDE = ("lazy", "lama") EXCLUDE = ("lazy_greedy_add", "lazy_greedy_cea", "lazy_greedy_cg") for key, value in configs.default_configs_satisficing(ipc=False, extended=True).items(): if any(x in key for x in INCLUDE) and not any(x in key for x in EXCLUDE): CONFIGS[key] = value print(sorted(CONFIGS.keys())) print(len(CONFIGS)) SUITE = downward.suites.suite_satisficing_with_ipc11() exp = common_setup.IssueExperiment( search_revisions=["issue77-v5-base", "issue77-v5"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
753
Python
24.133333
88
0.694555
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/issue77-v4-sat-lazy.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.suites import common_setup import configs CONFIGS = configs.default_configs_satisficing(ipc=False, extended=False) DISABLED = [ ] for key, value in list(CONFIGS.items()): if not key.startswith("lazy"): del CONFIGS[key] print(sorted(CONFIGS.keys())) print(len(CONFIGS)) SUITE = downward.suites.suite_satisficing_with_ipc11() exp = common_setup.IssueExperiment( search_revisions=["issue77-base", "issue77-v4"], configs=CONFIGS, suite=SUITE ) exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
638
Python
18.968749
72
0.703762
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue77/configs.py
def configs_optimal_core(): return { # A* "astar_blind": [ "--search", "astar(blind)"], "astar_h2": [ "--search", "astar(hm(2))"], "astar_ipdb": [ "--search", "astar(ipdb)"], "astar_lmcount_lm_merged_rhw_hm": [ "--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], "astar_lmcut": [ "--search", "astar(lmcut)"], "astar_hmax": [ "--search", "astar(hmax)"], "astar_merge_and_shrink_bisim": [ "--search", "astar(merge_and_shrink(" "merge_strategy=merge_linear(variable_order=reverse_level)," "shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false," "group_by_h=true)))"], "astar_merge_and_shrink_greedy_bisim": [ "--search", "astar(merge_and_shrink(" "merge_strategy=merge_linear(variable_order=reverse_level)," "shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1," "greedy=true,group_by_h=false)))"], "astar_merge_and_shrink_dfp_bisim": [ "--search", "astar(merge_and_shrink(merge_strategy=merge_dfp," "shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1," "greedy=false,group_by_h=true)))"], #"astar_selmax_lmcut_lmcount": [ # "--search", # "astar(selmax([lmcut(),lmcount(lm_merged([lm_hm(m=1),lm_rhw()])," # "admissible=true)],training_set=1000),mpd=true)"], } def configs_satisficing_core(): return { # A* "astar_goalcount": [ "--search", "astar(goalcount)"], # eager greedy "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_add": [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_cg": [ "--heuristic", "h=cg()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_cea": [ "--heuristic", "h=cea()", "--search", "eager_greedy(h, preferred=h)"], # lazy greedy "lazy_greedy_ff": [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_add": [ "--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_cg": [ "--heuristic", "h=cg()", "--search", "lazy_greedy(h, preferred=h)"], } def configs_optimal_ipc(): return { "seq_opt_merge_and_shrink": ["ipc", "seq-opt-merge-and-shrink"], "seq_opt_fdss_1": ["ipc", "seq-opt-fdss-1"], "seq_opt_fdss_2": ["ipc", "seq-opt-fdss-2"], } def configs_satisficing_ipc(): return { "seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"], "seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"], "seq_sat_fdss_2": ["ipc", "seq-sat-fdss-2"], } def configs_optimal_extended(): return { # A* "astar_lmcount_lm_merged_rhw_hm_no_order": [ "--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], } def configs_satisficing_extended(): return { # eager greedy "eager_greedy_alt_ff_cg": [ "--heuristic", "hff=ff()", "--heuristic", "hcg=cg()", "--search", "eager_greedy(hff,hcg,preferred=[hff,hcg])"], "eager_greedy_ff_no_pref": [ "--search", "eager_greedy(ff())"], # lazy greedy "lazy_greedy_alt_cea_cg": [ "--heuristic", "hcea=cea()", "--heuristic", "hcg=cg()", "--search", "lazy_greedy(hcea,hcg,preferred=[hcea,hcg])"], "lazy_greedy_ff_no_pref": [ "--search", "lazy_greedy(ff())"], "lazy_greedy_cea": [ "--heuristic", "h=cea()", "--search", "lazy_greedy(h, preferred=h)"], # lazy wA* "lazy_wa3_ff": [ "--heuristic", "h=ff()", "--search", "lazy_wastar(h,w=3,preferred=h)"], # eager wA* "eager_wa3_cg": [ "--heuristic", "h=cg()", "--search", "eager(single(sum([g(),weight(h,3)])),preferred=h)"], # ehc "ehc_ff": [ "--search", "ehc(ff())"], # iterated "iterated_wa_ff": [ "--heuristic", "h=ff()", "--search", "iterated([lazy_wastar(h,w=10), lazy_wastar(h,w=5), lazy_wastar(h,w=3)," "lazy_wastar(h,w=2), lazy_wastar(h,w=1)])"], # pareto open list "eager_pareto_ff": [ "--heuristic", "h=ff()", "--search", "eager(pareto([sum([g(), h]), h]), reopen_closed=true," "f_eval=sum([g(), h]))"], # bucket-based open list "eager_bucket_lmcut": [ "--heuristic", "h=lmcut()", "--search", "eager(single_buckets(h), reopen_closed=true)"], # LAMA's first iteration "lama_first": [ "--if-unit-cost", "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", "--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm])", "--if-non-unit-cost", "--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true," " lm_cost_type=one,cost_type=one))", "--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true," " lm_cost_type=plusone,cost_type=plusone))", "--search", "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1]," " cost_type=one,reopen_closed=false)", "--always"], } def default_configs_optimal(core=True, ipc=True, extended=False): configs = {} if core: configs.update(configs_optimal_core()) if ipc: configs.update(configs_optimal_ipc()) if extended: configs.update(configs_optimal_extended()) return configs def default_configs_satisficing(core=True, ipc=True, extended=False): configs = {} if core: configs.update(configs_satisficing_core()) if ipc: configs.update(configs_satisficing_ipc()) if extended: configs.update(configs_satisficing_extended()) return configs
7,002
Python
30.403587
89
0.457155
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue750/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,171
Python
35.715026
79
0.613859
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue750/v1-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue750-base", "issue750-v1"] BUILD_OPTIONS = ["release32nolp"] DRIVER_OPTIONS = ["--build", "release32nolp", "--search-time-limit", "1m"] CONFIGS = [ IssueConfig( "blind", ["--search", "astar(blind())"], build_options=BUILD_OPTIONS, driver_options=DRIVER_OPTIONS), IssueConfig( "lama-first", [], build_options=BUILD_OPTIONS, driver_options=DRIVER_OPTIONS + ["--alias", "lama-first"]), ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
1,784
Python
29.254237
93
0.659193
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue750/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue774/v2-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue774-v2-base", "issue774-v2"] CONFIGS = [ IssueConfig("blind", ["--search", "astar(blind())"]), IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]), IssueConfig( "lama-first", [], driver_options=["--alias", "lama-first"]), ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() for attr in ["total_time", "search_time", "memory"]: for rev1, rev2 in [("v2-base", "v2")]: for config_nick in ["blind", "ehc_ff", "lama-first"]: exp.add_report(RelativeScatterPlotReport( attributes=[attr], filter_algorithm=["issue774-%s-%s" % (rev1, config_nick), "issue774-%s-%s" % (rev2, config_nick)], get_category=lambda r1, r2: r1["domain"], ), outfile="issue774-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) exp.run_steps()
1,742
Python
30.124999
90
0.637773
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue774/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( node == "login-infai.scicore.unibas.ch" or node.endswith(".cluster.bc2.ch") or "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,266
Python
35.770618
79
0.613767
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue774/v1-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue774-base", "issue774-v1"] CONFIGS = [ IssueConfig("blind", ["--search", "astar(blind())"]), IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]), IssueConfig( "lama-first", [], driver_options=["--alias", "lama-first"]), ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() for attr in ["total_time", "search_time", "memory"]: for rev1, rev2 in [("base", "v1")]: for config_nick in ["blind", "ehc_ff", "lama-first"]: exp.add_report(RelativeScatterPlotReport( attributes=[attr], filter_algorithm=["issue774-%s-%s" % (rev1, config_nick), "issue774-%s-%s" % (rev2, config_nick)], get_category=lambda r1, r2: r1["domain"], ), outfile="issue774-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) exp.run_steps()
1,736
Python
30.017857
90
0.637673
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue774/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue602/v1-mco.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm from common_setup import IssueConfig, IssueExperiment SUITE_MCO14 = [ 'barman-mco14-strips', 'cavediving-mco14-adl', 'childsnack-mco14-strips', 'citycar-mco14-adl', 'floortile-mco14-strips', 'ged-mco14-strips', 'hiking-mco14-strips', 'maintenance-mco14-adl', 'openstacks-mco14-strips', 'parking-mco14-strips', 'tetris-mco14-strips', 'thoughtful-mco14-strips', 'transport-mco14-strips', 'visitall-mco14-strips', ] def main(revisions=None): suite = SUITE_MCO14 configs = [ IssueConfig("astar_goalcount", [ "--search", "astar(goalcount)"]), IssueConfig("eager_greedy_ff", [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_add", [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_cg", [ "--heuristic", "h=cg()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_cea", [ "--heuristic", "h=cea()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_ff", [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_add", [ "--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_cg", [ "--heuristic", "h=cg()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("seq_sat_lama_2011", [], driver_options=[ "--alias", "seq-sat-lama-2011"]), IssueConfig("seq_sat_fdss_1", [], driver_options=[ "--alias", "seq-sat-fdss-1"]), IssueConfig("seq_sat_fdss_2", [], driver_options=[ "--alias", "seq-sat-fdss-2"]), ] exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=[ #'cavediving-sat14-adl:testing01_easy.pddl', #'childsnack-sat14-strips:child-snack_pfile05.pddl', #'citycar-sat14-adl:p3-2-2-0-1.pddl', #'ged-sat14-strips:d-3-6.pddl', 'hiking-sat14-strips:ptesting-1-2-7.pddl', #'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl', #'tetris-sat14-strips:p020.pddl', #'thoughtful-sat14-strips:bootstrap-typed-01.pddl', #'transport-sat14-strips:p01.pddl', ], processes=4, email='[email protected]', ) exp.add_absolute_report_step() exp() main(revisions=['issue602-v1'])
2,976
Python
28.77
72
0.516465
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue602/v1-agl.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm from common_setup import IssueConfig, IssueExperiment SUITE_AGL14 = [ 'barman-agl14-strips', 'cavediving-agl14-adl', 'childsnack-agl14-strips', 'citycar-agl14-adl', 'floortile-agl14-strips', 'ged-agl14-strips', 'hiking-agl14-strips', 'maintenance-agl14-adl', 'openstacks-agl14-strips', 'parking-agl14-strips', 'tetris-agl14-strips', 'thoughtful-agl14-strips', 'transport-agl14-strips', 'visitall-agl14-strips', ] def main(revisions=None): suite = SUITE_AGL14 configs = [ IssueConfig("astar_goalcount", [ "--search", "astar(goalcount)"]), IssueConfig("eager_greedy_ff", [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_add", [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_cg", [ "--heuristic", "h=cg()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_cea", [ "--heuristic", "h=cea()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_ff", [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_add", [ "--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_cg", [ "--heuristic", "h=cg()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("seq_sat_lama_2011", [], driver_options=[ "--alias", "seq-sat-lama-2011"]), IssueConfig("seq_sat_fdss_1", [], driver_options=[ "--alias", "seq-sat-fdss-1"]), IssueConfig("seq_sat_fdss_2", [], driver_options=[ "--alias", "seq-sat-fdss-2"]), ] exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=[ #'cavediving-sat14-adl:testing01_easy.pddl', #'childsnack-sat14-strips:child-snack_pfile05.pddl', #'citycar-sat14-adl:p3-2-2-0-1.pddl', #'ged-sat14-strips:d-3-6.pddl', 'hiking-sat14-strips:ptesting-1-2-7.pddl', #'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl', #'tetris-sat14-strips:p020.pddl', #'thoughtful-sat14-strips:bootstrap-typed-01.pddl', #'transport-sat14-strips:p01.pddl', ], processes=4, email='[email protected]', ) exp.add_absolute_report_step() exp() main(revisions=['issue602-v1'])
2,976
Python
28.77
72
0.516465
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue602/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Wrapper for FastDownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions, configs, suite, grid_priority=None, path=None, test_suite=None, email=None, processes=1, **kwargs): """Create a DownwardExperiment with some convenience features. If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(os.path.join(repo, "benchmarks"), suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,533
Python
34.011173
83
0.596346
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue602/v1-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm from common_setup import IssueConfig, IssueExperiment SUITE_SAT14 = [ 'barman-sat14-strips', 'cavediving-sat14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'maintenance-sat14-adl', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def main(revisions=None): suite = SUITE_SAT14 configs = [ IssueConfig("astar_goalcount", [ "--search", "astar(goalcount)"]), IssueConfig("eager_greedy_ff", [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_add", [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_cg", [ "--heuristic", "h=cg()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("eager_greedy_cea", [ "--heuristic", "h=cea()", "--search", "eager_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_ff", [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_add", [ "--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("lazy_greedy_cg", [ "--heuristic", "h=cg()", "--search", "lazy_greedy(h, preferred=h)"]), IssueConfig("seq_sat_lama_2011", [], driver_options=[ "--alias", "seq-sat-lama-2011"]), IssueConfig("seq_sat_fdss_1", [], driver_options=[ "--alias", "seq-sat-fdss-1"]), IssueConfig("seq_sat_fdss_2", [], driver_options=[ "--alias", "seq-sat-fdss-2"]), ] exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=[ #'cavediving-sat14-adl:testing01_easy.pddl', #'childsnack-sat14-strips:child-snack_pfile05.pddl', #'citycar-sat14-adl:p3-2-2-0-1.pddl', #'ged-sat14-strips:d-3-6.pddl', 'hiking-sat14-strips:ptesting-1-2-7.pddl', #'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl', #'tetris-sat14-strips:p020.pddl', #'thoughtful-sat14-strips:bootstrap-typed-01.pddl', #'transport-sat14-strips:p01.pddl', ], processes=4, email='[email protected]', ) exp.add_absolute_report_step() exp() main(revisions=['issue602-v1'])
2,976
Python
28.77
72
0.516465
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue849/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,786
Python
36.435443
82
0.618355
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue849/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) # TODO: replace above by below in future experiments parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float) parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_atomic_fts_constructed(content, props): ms_atomic_construction_time = props.get('ms_atomic_construction_time') ms_atomic_fts_constructed = False if ms_atomic_construction_time is not None: ms_atomic_fts_constructed = True props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed parser.add_function(check_atomic_fts_constructed) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'success' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) parser.parse()
2,958
Python
39.534246
139
0.686613
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue849/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue849-base", "issue849-v1"] BUILDS = ["release32"] CONFIG_NICKS = [ ('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), ('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), ('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), ] CONFIGS = [ IssueConfig( config_nick, config, build_options=[build], driver_options=["--build", build]) for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('ms-parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, ms_construction_time, ms_atomic_construction_time, ms_abstraction_constructed, ms_atomic_fts_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step(attributes=attributes) exp.run_steps()
4,069
Python
41.395833
451
0.755222
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue849/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v7.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(combinations=[("issue571-base", "issue571-base", "issue571-v6"), ("issue571-base", "issue571-base", "issue571-v7")])
193
Python
26.714282
121
0.658031
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v4.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(combinations=[("issue571-base", "issue571-base", "issue571-v3"), ("issue571-base", "issue571-base", "issue571-v4")])
193
Python
26.714282
121
0.658031
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v5.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(combinations=[("issue571-base", "issue571-base", "issue571-v4"), ("issue571-base", "issue571-base", "issue571-v5")])
193
Python
26.714282
121
0.658031
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/main.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm import common_setup def main(revisions=None, combinations=None): LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() B_CONFIGS = { 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } G_CONFIGS = { 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } F_CONFIGS = { 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], } CONFIGS = dict(B_CONFIGS) CONFIGS.update(G_CONFIGS) CONFIGS.update(F_CONFIGS) exp = common_setup.IssueExperiment( search_revisions=revisions, combinations=combinations, configs=CONFIGS, suite=SUITE, limits=LIMITS, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_search_parser('ms-parser.py') # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, actual_search_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step(attributes=attributes) exp()
4,450
Python
55.341771
273
0.723371
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v8.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(combinations=[("issue571-base", "issue571-base", "issue571-base-v2"), ("issue571-base", "issue571-base", "issue571-v7"), ("issue571-base", "issue571-base", "issue571-v8")])
249
Python
34.714281
177
0.662651
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/compare-base-base-v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport import common_setup REVS = ["issue571-base", "issue571-base-v2"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() B_CONFIGS = { 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } G_CONFIGS = { 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } F_CONFIGS = { 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], } CONFIGS = dict(B_CONFIGS) CONFIGS.update(G_CONFIGS) CONFIGS.update(F_CONFIGS) exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, actual_search_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) def filter_base1(run): config = run['config'] if config.startswith('issue571-base-'): return True return False def filter_base2(run): config = run['config'] if config.startswith('issue571-base-issue571-base-issue571-base-v2-'): return True return False exp.add_fetcher('data/issue571-v1-eval', filter=filter_base1) exp.add_fetcher('data/issue571-v2-eval', filter=filter_base2) # TODO: the following does not work, presumably because issue571-base # is not represented as issue571-base-issue571-base-issue571-base #exp.add_comparison_table_step(attributes=attributes) compared_configs=[] configs1 = ['issue571-base-%s' % conf for conf in CONFIGS] configs2 = ['issue571-base-issue571-base-issue571-base-v2-%s' % conf for conf in CONFIGS] for index in range(len(configs1)): compared_configs.append((configs1[index], configs2[index])) exp.add_report(CompareConfigsReport(attributes=attributes, compared_configs=compared_configs)) exp()
5,254
Python
50.519607
273
0.748192
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, email=None, processes=1, combinations=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority, email=email) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, combinations, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) if combinations: kwargs["combinations"].extend([ (Translator(repo, comb[0]), Preprocessor(repo, comb[1]), Planner(repo, comb[2])) for comb in combinations]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in self.get_supported_attributes( config_nick, attributes): make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
13,259
Python
35.13079
88
0.608643
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v6.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(combinations=[("issue571-base", "issue571-base", "issue571-v5"), ("issue571-base", "issue571-base", "issue571-v6")])
193
Python
26.714282
121
0.658031
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('initial_h_value', 'initial h value: (\d+)', required=False, type=int) parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_proved_unsolvability(content, props): proved_unsolvability = False if props['coverage'] == 0: for line in content.splitlines(): if line == 'Completely explored state space -- no solution!': proved_unsolvability = True break props['proved_unsolvability'] = proved_unsolvability parser.add_function(check_proved_unsolvability) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'none' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory # Compute actual search time if ms_abstraction_constructed == True and props.get('search_time') is not None: difference = props.get('search_time') - props.get('ms_construction_time') props['actual_search_time'] = difference parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) parser.parse()
2,879
Python
36.402597
128
0.663077
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(revisions=["issue571-base", "issue571-v1"])
120
Python
16.285712
48
0.65
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(combinations=[("issue571-base", "issue571-base", "issue571-v2"), ("issue571-base", "issue571-base", "issue571-v3")])
193
Python
26.714282
121
0.658031
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue571/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(combinations=[("issue571-base", "issue571-base", "issue571-base-v2"), ("issue571-base", "issue571-base", "issue571-v2")])
198
Python
27.428568
126
0.661616
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_running_on_cluster_login_node(): return platform.node() == "login20.cluster.bc2.ch" def can_publish(): return is_running_on_cluster_login_node() or not is_running_on_cluster() def publish(report_file): if can_publish(): subprocess.call(["publish", report_file]) else: print "publishing reports is not supported on this node" def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, name="make-absolute-report", outfile=outfile) self.add_step("publish-absolute-report", publish, outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def get_revision_pairs_and_files(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) yield (rev1, rev2, outfile) def make_comparison_tables(): for rev1, rev2, outfile in get_revision_pairs_and_files(): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) report(self.eval_dir, outfile) def publish_comparison_tables(): for _, _, outfile in get_revision_pairs_and_files(): publish(outfile) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step("publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,462
Python
35.24812
79
0.618725
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/v2-vs-base.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = [] CONFIGS = [] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_algorithm( "base-ipdb-no-goal-vars", common_setup.get_repo_base(), "issue743-base", ['--search', 'astar(ipdb(max_time=900))']) exp.add_algorithm( "v2-ipdb-no-goal-vars", common_setup.get_repo_base(), "issue743-v2", ['--search', 'astar(ipdb(max_time=900, use_co_effect_goal_variables=false))']) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() #exp.add_comparison_table_step() exp.run_steps()
1,218
Python
28.731707
82
0.7422
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue743-v1"] CONFIGS = [ IssueConfig( 'ipdb-goal-vars-{goal_vars}'.format(**locals()), ['--search', 'astar(ipdb(max_time=900, consider_co_effect_vars={goal_vars}))'.format(**locals())]) for goal_vars in [False, True] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() #exp.add_comparison_table_step() exp.run_steps()
1,130
Python
27.274999
106
0.738938
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue743-v2", "issue743-v3"] CONFIGS = [ IssueConfig('ipdb-900s', ['--search', 'astar(ipdb(max_time=900))']) ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
1,000
Python
26.054053
71
0.759
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue743/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue743-v2"] CONFIGS = [ IssueConfig( 'ipdb-goal-vars-{goal_vars}'.format(**locals()), ['--search', 'astar(ipdb(max_time=900, use_co_effect_goal_variables={goal_vars}))'.format(**locals())]) for goal_vars in [False, True] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() #exp.add_comparison_table_step() exp.run_steps()
1,135
Python
27.399999
111
0.739207
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue733/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_running_on_cluster_login_node(): return platform.node() == "login20.cluster.bc2.ch" def can_publish(): return is_running_on_cluster_login_node() or not is_running_on_cluster() def publish(report_file): if can_publish(): subprocess.call(["publish", report_file]) else: print "publishing reports is not supported on this node" def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, name="make-absolute-report", outfile=outfile) self.add_step("publish-absolute-report", publish, outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def get_revision_pairs_and_files(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) yield (rev1, rev2, outfile) def make_comparison_tables(): for rev1, rev2, outfile in get_revision_pairs_and_files(): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) report(self.eval_dir, outfile) def publish_comparison_tables(): for _, _, outfile in get_revision_pairs_and_files(): publish(outfile) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step("publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,462
Python
35.24812
79
0.618725
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue733/v1.py
#! /usr/bin/env python2 # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab import tools from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue733-base", "issue733-v1"] PYTHONS = ["python2.7", "python3.5"] CONFIGS = [ IssueConfig( "{python}".format(**locals()), [], driver_options=["--translate"]) for python in PYTHONS ] SUITE = set(common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) BaselSlurmEnvironment.ENVIRONMENT_SETUP = ( 'module purge\n' 'module load Python/3.5.2-goolf-1.7.20\n' 'module load matplotlib/1.5.1-goolf-1.7.20-Python-3.5.2\n' 'PYTHONPATH="%s:$PYTHONPATH"' % tools.get_lab_path()) ENVIRONMENT = BaselSlurmEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) class PythonVersionExperiment(IssueExperiment): def _add_runs(self): IssueExperiment._add_runs(self) for run in self.runs: python = run.algo.name.split("-")[-1] command, kwargs = run.commands["fast-downward"] command = [python] + command run.commands["fast-downward"] = (command, kwargs) exp = PythonVersionExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) del exp.commands["parse-search"] exp.add_suite(BENCHMARKS_DIR, SUITE) attributes = ["translator_time_done", "translator_peak_memory"] exp.add_comparison_table_step(attributes=attributes) compared_configs = [ ("issue733-v1-python2.7", "issue733-v1-python3.5", "Diff")] exp.add_report( ComparativeReport(compared_configs, attributes=attributes), name="compare-python-versions") exp.run_steps()
2,020
Python
30.092307
88
0.700495
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue733/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue657/v4.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from common_setup import IssueConfig, IssueExperiment, is_test_run BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue657-v3", "issue657-v4"] CONFIGS = [ IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) for heuristic in [ "cegar(subtasks=[landmarks(),goals()],max_transitions=1000000)", "cegar(subtasks=[original()],max_transitions=1000000)"] ] SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) exp()
2,233
Python
36.864406
74
0.700851
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue657/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ revisions = revisions or [] configs = configs or [] path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step( 'publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step( "publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step(step_name, make_scatter_plots))
11,532
Python
33.121302
79
0.596861
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue657/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from common_setup import IssueConfig, IssueExperiment, is_test_run BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue657-v1-base", "issue657-v1"] CONFIGS = [ IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) for heuristic in [ "cegar(max_states=10000)", "cegar(subtasks=[original()],max_states=10000)"] ] SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(attributes=["total_time"]) exp()
2,178
Python
35.932203
74
0.698347
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue657/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict import logging from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: logging.critical("Can only compare 2 configs") run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) if self.ylim_bottom == self.ylim_top: self.ylim_bottom *= 0.95 self.ylim_top *= 1.05 return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
4,091
Python
35.864865
78
0.596676
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue657/v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from common_setup import IssueConfig, IssueExperiment, is_test_run BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue657-v2", "issue657-v3"] CONFIGS = [ IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) for heuristic in [ "cegar(subtasks=[landmarks(),goals()],max_transitions=1000000)", "cegar(subtasks=[original()],max_transitions=1000000)"] ] SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) exp()
2,233
Python
36.864406
74
0.700851
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue657/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from common_setup import IssueConfig, IssueExperiment, is_test_run, get_repo_base from relativescatter import RelativeScatterPlotReport BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REPO = get_repo_base() SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_algorithm( "01:issue657-v2-base:cegar", REPO, "issue657-v2-base", ["--search", "astar(cegar(max_states=10000,max_time=infinity))"]) exp.add_algorithm( "02:issue657-v2:cegar", REPO, "issue657-v2", ["--search", "astar(cegar(max_states=10000,max_time=infinity,max_transitions=infinity))"]) exp.add_absolute_report_step() exp.add_report(RelativeScatterPlotReport( filter_config=["01:issue657-v2-base:cegar", "02:issue657-v2:cegar"], attributes=["total_time"], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)), outfile="issue657-base-vs-v2.png") exp()
2,503
Python
36.939393
94
0.696364
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue544/sat-v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue544-base-v2", "issue544-v2"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
526
Python
17.172413
45
0.612167
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue544/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,755
Python
35.135977
79
0.610349
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue544/regression-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Before you can run the experiment you need to create duplicates of the two tasks we want to test: cd ../benchmarks/trucks-strips for i in {00..19}; do cp p16.pddl p16-$i.pddl; done for i in {00..19}; do cp domain_p16.pddl domain_p16-$i.pddl; done cd ../freecell for i in {00..19}; do cp probfreecell-11-5.pddl probfreecell-11-5-$i.pddl; done Don't forget to remove the duplicate tasks afterwards. Otherwise they will be included in subsequent experiments. """ import common_setup REVS = ["issue544-base", "issue544-v1"] LIMITS = {"search_time": 1800} CONFIGS = { "eager_greedy_add": [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], "lazy_greedy_add": [ "--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_ff": [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], } TEST_RUN = False if TEST_RUN: SUITE = "gripper:prob01.pddl" PRIORITY = None # "None" means local experiment else: SUITE = (["trucks-strips:p16-%02d.pddl" % i for i in range(20)] + ["freecell:probfreecell-11-5-%02d.pddl" % i for i in range(20)]) PRIORITY = 0 # number means maia experiment exp = common_setup.IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
1,616
Python
23.134328
79
0.592203
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue544/sat-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue544-base", "issue544-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { "eager_greedy_add": [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], "lazy_greedy_add": [ "--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_ff": [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
904
Python
19.568181
45
0.529867
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue544/sat-v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue544-base-v2", "issue544-v2", "issue544-v3"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
541
Python
17.689655
57
0.613678
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue919/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, outfile=None, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = outfile or os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-' + os.path.basename(outfile), subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. Use *suffix* to denote a step name and filename suffix if you want to add multiple different comparison table steps. All *kwargs* except *suffix* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) suffix = kwargs.pop("suffix", "") if suffix: suffix = "-" + suffix def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare%s.%s" % ( self.name, rev1, rev2, suffix, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare%s.html" % (self.name, rev1, rev2, suffix)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables%s" % suffix, make_comparison_tables) self.add_step( "publish-comparison-tables%s" % suffix, publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
15,119
Python
36.61194
90
0.618096
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue919/translator_additional_parser.py
#!/usr/bin/env python import hashlib from lab.parser import Parser def add_hash_value(content, props): props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest() parser = Parser() parser.add_function(add_hash_value, file="output.sas") parser.parse()
273
Python
20.076922
77
0.739927
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue919/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from collections import defaultdict import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab import tools from downward.reports.compare import ComparativeReport from downward.reports import PlanningReport import common_setup from common_setup import IssueConfig, IssueExperiment EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue919-base", "issue919-v1"] CONFIGS = [ IssueConfig( "translate-only", [], driver_options=["--translate"]) ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) # This was generated by running "./suites.py all" in the benchmarks # repository. SUITE = [ 'agricola-opt18-strips', 'agricola-sat18-strips', 'airport', 'airport-adl', 'assembly', 'barman-mco14-strips', 'barman-opt11-strips', 'barman-opt14-strips', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-opt18-adl', 'caldera-sat18-adl', 'caldera-split-opt18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-opt14-strips', 'childsnack-sat14-strips', 'citycar-opt14-adl', 'citycar-sat14-adl', 'data-network-opt18-strips', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-opt11-strips', 'floortile-opt14-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-opt14-strips', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-agl14-strips', 'hiking-opt14-strips', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-opt14-adl', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'no-mprime', 'no-mystery', 'nomystery-opt11-strips', 'nomystery-sat11-strips', 'nurikabe-opt18-adl', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-agl14-strips', 'openstacks-opt08-adl', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-opt18-strips', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-opt18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parcprinter-sat11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pegsol-sat11-strips', 'petri-net-alignment-opt18-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-opt18-adl', 'settlers-sat18-adl', 'snake-opt18-strips', 'snake-sat18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-opt18-strips', 'spider-sat18-strips', 'storage', 'termes-opt18-strips', 'termes-sat18-strips', 'tetris-opt14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'thoughtful-sat14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel', ] if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser("translator_additional_parser.py") del exp.commands['remove-output-sas'] class TranslatorDiffReport(PlanningReport): def get_cell(self, run): return ";".join(run.get(attr) for attr in self.attributes) def get_text(self): lines = [] for runs in self.problem_runs.values(): hashes = set([r.get("translator_output_sas_hash") for r in runs]) if len(hashes) > 1 or None in hashes: lines.append(";".join([self.get_cell(r) for r in runs])) return "\n".join(lines) class SameValueFilters(object): """Ignore runs for a task where all algorithms have the same value.""" def __init__(self, attribute): self._attribute = attribute self._tasks_to_values = defaultdict(list) def _get_task(self, run): return (run['domain'], run['problem']) def store_values(self, run): value = run.get(self._attribute) self._tasks_to_values[self._get_task(run)].append(value) # Don't filter this run, yet. return True def filter_tasks_with_equal_values(self, run): values = self._tasks_to_values[self._get_task(run)] return len(set(values)) != 1 exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"] # exp.add_absolute_report_step( # outfile=os.path.join(exp.eval_dir, "{EXPNAME}.html".format(**locals())), # attributes=ATTRIBUTES) exp.add_comparison_table_step( attributes=ATTRIBUTES) same_value_filters = SameValueFilters("translator_output_sas_hash") # exp.add_absolute_report_step( # outfile=os.path.join(exp.eval_dir, "{EXPNAME}-filtered.html".format(**locals())), # attributes=ATTRIBUTES, # filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values]) exp.add_comparison_table_step( suffix="filtered", attributes=ATTRIBUTES, filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values]) exp.add_report(TranslatorDiffReport( attributes=["domain", "problem", "algorithm", "run_dir"] ), outfile="different_output_sas.csv" ) exp.run_steps()
7,065
Python
27.37751
97
0.659731
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue919/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue344/common_setup.py
# -*- coding: utf-8 -*- import os.path import platform from lab.environments import MaiaEnvironment from lab.steps import Step from downward.checkouts import Translator, Preprocessor, Planner from downward.experiments import DownwardExperiment from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the filename of the main script, e.g. "/ham/spam/eggs.py" => "eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Found by searching upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found.""" path = os.path.abspath(get_script_dir()) while True: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) def build_combos_with_names(repo, combinations, revisions, search_revisions): """Build (combos, combo_names) lists for the given planner revisions. combos and combo_names are parallel lists, where combos contains (Translator, Preprocessor, Search) triples and combo_names are the names for the respective combinations that lab uses internally. See MyExperiment.__init__ for documentation of the parameters combinations, revisions and search_revisions.""" combos = [] names = [] def build(*rev_triple): combo, name = build_combo_with_name(repo, *rev_triple) combos.append(combo) names.append(name) for triple in combinations or []: build(triple) for rev in revisions or []: build(rev, rev, rev) for rev in search_revisions or []: build(search_revisions[0], search_revisions[0], rev) return combos, names def build_combo_with_name(repo, trans_rev, preprocess_rev, search_rev): """Generate a tuple (combination, name) for the given revisions. combination is a (Translator, Preprocessor, Search) tuple and name is the name that lab uses to refer to it.""" # TODO: In the future, it would be nice if we didn't need the name # information any more, as it is somewhat of an implementation # detail. combo = (Translator(repo, trans_rev), Preprocessor(repo, preprocess_rev), Planner(repo, search_rev)) if trans_rev == preprocess_rev == search_rev: name = str(search_rev) else: name = "%s-%s-%s" % (trans_rev, preprocess_rev, search_rev) return combo, name def is_on_grid(): """Returns True if the current machine is on the maia grid. Implemented by checking if host name ends with ".cluster". """ return platform.node().endswith(".cluster") class MyExperiment(DownwardExperiment): DEFAULT_TEST_SUITE = [ "zenotravel:pfile1", "zenotravel:pfile2", ] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "total_time", "search_time", "memory", "expansions_until_last_jump", ] """Wrapper for DownwardExperiment with a few convenience features.""" def __init__(self, configs=None, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, combinations=None, suite=None, do_test_run="auto", test_suite=DEFAULT_TEST_SUITE, **kwargs): """Create a DownwardExperiment with some convenience features. If "configs" is specified, it should be a dict of {nick: cmdline} pairs that sets the planner configurations to test. If "grid_priority" is specified and no environment is specifically requested in **kwargs, use the maia environment with the specified priority. If "path" is not specified, the experiment data path is derived automatically from the main script's filename. If "repo" is not specified, the repository base is derived automatically from the main script's path. If "combinations" is specified, it should be a non-empty list of revision triples of the form (translator_rev, preprocessor_rev, search_rev). If "revisions" is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. If "search_revisions" is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All experiments use the translator and preprocessor component of the first revision. It is possible to specify a mixture of"combinations", "revisions" and "search_revisions". If "suite" is specified, it should specify a problem suite. If "do_test_run" is true, the "grid_priority" and "environment" (from the base class) arguments are ignored and a local experiment with default arguments is run instead. In this case, the "suite" argument is replaced by the "test_suite" argument. If "do_test_run" is the string "auto" (the default), then do_test_run is set to False when run on a grid machine and to True otherwise. A grid machine is identified as one whose node name ends with ".cluster". """ if do_test_run == "auto": do_test_run = not is_on_grid() if do_test_run: # In a test run, overwrite certain arguments. grid_priority = None kwargs.pop("environment", None) suite = test_suite if grid_priority is not None and "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() combinations, self._combination_names = build_combos_with_names( repo=repo, combinations=combinations, revisions=revisions, search_revisions=search_revisions) kwargs["combinations"] = combinations DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) if configs is not None: for nick, config in configs.items(): self.add_config(nick, config) if suite is not None: self.add_suite(suite) self._report_prefix = get_experiment_name() def add_comparison_table_step(self, attributes=None): revisions = self._combination_names if len(revisions) != 2: # TODO: Should generalize this by offering a general # grouping function and then comparing any pair of # settings in the same group. raise NotImplementedError("need two revisions") if attributes is None: attributes = self.DEFAULT_TABLE_ATTRIBUTES report = CompareRevisionsReport(*revisions, attributes=attributes) self.add_report(report, outfile="%s-compare.html" % self._report_prefix) def add_scatter_plot_step(self, attributes=None): if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES revisions = self._combination_names if len(revisions) != 2: # TODO: Should generalize this by offering a general # grouping function and then comparing any pair of # settings in the same group. raise NotImplementedError("need two revisions") scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plots(): configs = [conf[0] for conf in self.configs] for nick in configs: config_before = "%s-%s" % (revisions[0], nick) config_after = "%s-%s" % (revisions[1], nick) for attribute in attributes: name = "%s-%s-%s" % (self._report_prefix, attribute, nick) report = ScatterPlotReport( filter_config=[config_before, config_after], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots))
9,626
Python
34.788104
80
0.628922
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue344/exp14.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import downward.configs import downward.suites import common_setup exp = common_setup.MyExperiment( grid_priority=0, search_revisions=["issue344-base", "issue344-v5"], configs=downward.configs.default_configs_optimal(), suite=downward.suites.suite_optimal_with_ipc11(), do_test_run="auto" ) exp.add_comparison_table_step() exp.add_scatter_plot_step() exp()
434
Python
18.772726
55
0.700461
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue601/issue601-v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm import common_setup def main(revisions=None): SUITE = suites.suite_optimal_with_ipc11() B_CONFIGS = { 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], } G_CONFIGS = { 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], } F_CONFIGS = { 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], } CONFIGS = dict(B_CONFIGS) CONFIGS.update(G_CONFIGS) CONFIGS.update(F_CONFIGS) exp = common_setup.IssueExperiment( revisions=revisions, configs=CONFIGS, suite=SUITE, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step() exp() main(revisions=["issue601-v1", "issue601-v2"])
4,364
Python
55.688311
267
0.711274
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue601/issue601-base.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm import common_setup def main(revisions=None): SUITE = suites.suite_optimal_with_ipc11() B_CONFIGS = { 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } G_CONFIGS = { 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } F_CONFIGS = { 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], } CONFIGS = dict(B_CONFIGS) CONFIGS.update(G_CONFIGS) CONFIGS.update(F_CONFIGS) exp = common_setup.IssueExperiment( revisions=revisions, configs=CONFIGS, suite=SUITE, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp() main(revisions=["issue601-base"])
4,404
Python
57.733333
277
0.715713
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue601/issue601-v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm import common_setup def main(revisions=None): SUITE = suites.suite_optimal_with_ipc11() B_CONFIGS = { 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], } G_CONFIGS = { 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], } F_CONFIGS = { 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], } CONFIGS = dict(B_CONFIGS) CONFIGS.update(G_CONFIGS) CONFIGS.update(F_CONFIGS) exp = common_setup.IssueExperiment( revisions=revisions, configs=CONFIGS, suite=SUITE, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step() exp() main(revisions=["issue601-v2", "issue601-v3"])
4,364
Python
55.688311
267
0.711274
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue601/issue601-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm from downward.reports.compare import CompareConfigsReport import common_setup def main(revisions=None): SUITE = suites.suite_optimal_with_ipc11() B_CONFIGS = { 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], } G_CONFIGS = { 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], } F_CONFIGS = { 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], } CONFIGS = dict(B_CONFIGS) CONFIGS.update(G_CONFIGS) CONFIGS.update(F_CONFIGS) exp = common_setup.IssueExperiment( revisions=revisions, configs=CONFIGS, suite=SUITE, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_fetcher('data/issue601-base-eval') exp.add_report(CompareConfigsReport(compared_configs=[ ('issue601-base-rl-b50k', 'issue601-v1-rl-b50k'), ('issue601-base-cggl-b50k', 'issue601-v1-cggl-b50k'), ('issue601-base-dfp-b50k', 'issue601-v1-dfp-b50k'), ('issue601-base-rl-ginf', 'issue601-v1-rl-ginf'), ('issue601-base-cggl-ginf', 'issue601-v1-cggl-ginf'), ('issue601-base-dfp-ginf', 'issue601-v1-dfp-ginf'), ('issue601-base-rl-f50k', 'issue601-v1-rl-f50k'), ('issue601-base-cggl-f50k', 'issue601-v1-cggl-f50k'), ('issue601-base-dfp-f50k', 'issue601-v1-dfp-f50k'), ],attributes=attributes)) exp() main(revisions=["issue601-v1"])
5,047
Python
55.7191
267
0.702397
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue601/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments.fast_downward_experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(FastDownwardExperiment): """Wrapper for FastDownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, revisions, suite, build_options=None, driver_options=None, grid_priority=None, test_suite=None, email=None, processes=1, **kwargs): """Create an FastDownwardExperiment with some convenience features. All configs will be run on all revisions. Inherited options *path*, *environment* and *cache_dir* from FastDownwardExperiment are not supported and will be automatically set. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. nick will automatically get the revision prepended, e.g. 'issue123-base-<nick>':: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *revisions* must be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): environment = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: environment = MaiaEnvironment(priority=grid_priority, email=email) FastDownwardExperiment.__init__(self, environment=environment, **kwargs) # Automatically deduce the downward repository from the file repo = get_repo_base() self.algorithm_nicks = [] self.revisions = revisions for nick, cmdline in configs.items(): for rev in revisions: algo_nick = '%s-%s' % (rev, nick) self.add_algorithm(algo_nick, repo, rev, cmdline, build_options, driver_options) self.algorithm_nicks.append(algo_nick) benchmarks_dir = os.path.join(repo, 'benchmarks') self.add_suite(benchmarks_dir, suite) self.search_parsers = [] def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) # oufile is of the form <rev1>-<rev2>-...-<revn>.<format> outfile = '' for rev in self.revisions: outfile += rev outfile += '-' outfile = outfile[:len(outfile)-1] outfile += '.' outfile += report.output_format outfile = os.path.join(self.eval_dir, outfile) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revisions, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revisions, 2): outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) subprocess.call(['publish', outfile]) self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) # TODO: this is copied from the old common_setup, but not tested # with the new FastDownwardExperiment class! def add_scatter_plot_step(self, attributes=None): print 'This has not been tested with the new FastDownwardExperiment class!' exit(0) """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
11,089
Python
35.843854
93
0.59816
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue601/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[.+s\]', required=False, type=float) parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_proved_unsolvability(content, props): proved_unsolvability = False if props['coverage'] == 0: for line in content.splitlines(): if line == 'Completely explored state space -- no solution!': proved_unsolvability = True break props['proved_unsolvability'] = proved_unsolvability parser.add_function(check_proved_unsolvability) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'none' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) parser.parse()
2,646
Python
35.763888
128
0.661376
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue198/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,153
Python
35.955613
82
0.615205
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue198/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue198/v2-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue198-v2-base", "issue198-v2"] CONFIGS = [ IssueConfig('astar_blind', ['--search', 'astar(blind)']), IssueConfig('seq-opt-bjolp', [], driver_options=["--alias", "seq-opt-bjolp"]), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_parse_again_step() #exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
1,821
Python
28.868852
93
0.688083
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue666/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=None, **kwargs): """ If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(benchmarks_dir, suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,496
Python
33.907821
83
0.59435
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue666/suites.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import argparse import textwrap HELP = "Convert suite name to list of domains or tasks." def suite_alternative_formulations(): return ['airport-adl', 'no-mprime', 'no-mystery'] def suite_ipc98_to_ipc04_adl(): return [ 'assembly', 'miconic-fulladl', 'miconic-simpleadl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'schedule', ] def suite_ipc98_to_ipc04_strips(): return [ 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', 'satellite', 'zenotravel', ] def suite_ipc98_to_ipc04(): # All IPC1-4 domains, including the trivial Movie. return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) def suite_ipc06_adl(): return [ 'openstacks', 'pathways', 'trucks', ] def suite_ipc06_strips_compilations(): return [ 'openstacks-strips', 'pathways-noneg', 'trucks-strips', ] def suite_ipc06_strips(): return [ 'pipesworld-tankage', 'rovers', 'storage', 'tpp', ] def suite_ipc06(): return sorted(suite_ipc06_adl() + suite_ipc06_strips()) def suite_ipc08_common_strips(): return [ 'parcprinter-08-strips', 'pegsol-08-strips', 'scanalyzer-08-strips', ] def suite_ipc08_opt_adl(): return ['openstacks-opt08-adl'] def suite_ipc08_opt_strips(): return sorted(suite_ipc08_common_strips() + [ 'elevators-opt08-strips', 'openstacks-opt08-strips', 'sokoban-opt08-strips', 'transport-opt08-strips', 'woodworking-opt08-strips', ]) def suite_ipc08_opt(): return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) def suite_ipc08_sat_adl(): return ['openstacks-sat08-adl'] def suite_ipc08_sat_strips(): return sorted(suite_ipc08_common_strips() + [ # Note: cyber-security is missing. 'elevators-sat08-strips', 'openstacks-sat08-strips', 'sokoban-sat08-strips', 'transport-sat08-strips', 'woodworking-sat08-strips', ]) def suite_ipc08_sat(): return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) def suite_ipc08(): return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) def suite_ipc11_opt(): return [ 'barman-opt11-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'nomystery-opt11-strips', 'openstacks-opt11-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'pegsol-opt11-strips', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'tidybot-opt11-strips', 'transport-opt11-strips', 'visitall-opt11-strips', 'woodworking-opt11-strips', ] def suite_ipc11_sat(): return [ 'barman-sat11-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'nomystery-sat11-strips', 'openstacks-sat11-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'pegsol-sat11-strips', 'scanalyzer-sat11-strips', 'sokoban-sat11-strips', 'tidybot-sat11-strips', 'transport-sat11-strips', 'visitall-sat11-strips', 'woodworking-sat11-strips', ] def suite_ipc11(): return sorted(suite_ipc11_opt() + suite_ipc11_sat()) def suite_ipc14_agl_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_agl_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-agl14-strips', 'openstacks-agl14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_agl(): return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) def suite_ipc14_mco_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_mco_strips(): return [ 'barman-mco14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_mco(): return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) def suite_ipc14_opt_adl(): return [ 'cavediving-14-adl', 'citycar-opt14-adl', 'maintenance-opt14-adl', ] def suite_ipc14_opt_strips(): return [ 'barman-opt14-strips', 'childsnack-opt14-strips', 'floortile-opt14-strips', 'ged-opt14-strips', 'hiking-opt14-strips', 'openstacks-opt14-strips', 'parking-opt14-strips', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'transport-opt14-strips', 'visitall-opt14-strips', ] def suite_ipc14_opt(): return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) def suite_ipc14_sat_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_sat_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_sat(): return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) def suite_ipc14(): return sorted(set( suite_ipc14_agl() + suite_ipc14_mco() + suite_ipc14_opt() + suite_ipc14_sat())) def suite_unsolvable(): return sorted( ['mystery:prob%02d.pddl' % index for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) def suite_optimal_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) def suite_optimal_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + suite_ipc11_opt() + suite_ipc14_opt_strips()) def suite_optimal(): return sorted(suite_optimal_adl() + suite_optimal_strips()) def suite_satisficing_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) def suite_satisficing_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + suite_ipc11_sat() + suite_ipc14_sat_strips()) def suite_satisficing(): return sorted(suite_satisficing_adl() + suite_satisficing_strips()) def suite_all(): return sorted( suite_ipc98_to_ipc04() + suite_ipc06() + suite_ipc06_strips_compilations() + suite_ipc08() + suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("suite", help="suite name") return parser.parse_args() def main(): prefix = "suite_" suite_names = [ name[len(prefix):] for name in sorted(globals().keys()) if name.startswith(prefix)] parser = argparse.ArgumentParser(description=HELP) parser.add_argument("suite", choices=suite_names, help="suite name") parser.add_argument( "--width", default=72, type=int, help="output line width (default: %(default)s). Use 1 for single " "column.") args = parser.parse_args() suite_func = globals()[prefix + args.suite] print(textwrap.fill( str(suite_func()), width=args.width, break_long_words=False, break_on_hyphens=False)) if __name__ == "__main__": main()
8,551
Python
23.364672
77
0.595954
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue666/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'none' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) def check_proved_unsolvability(content, props): proved_unsolvability = False if props['coverage'] == 0: for line in content.splitlines(): if line == 'Completely explored state space -- no solution!': proved_unsolvability = True break props['proved_unsolvability'] = proved_unsolvability parser.add_function(check_proved_unsolvability) parser.parse()
2,784
Python
37.150684
135
0.665589
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue666/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os import suites from lab.reports import Attribute, gm from common_setup import IssueConfig, IssueExperiment try: from relativescatter import RelativeScatterPlotReport matplotlib = True except ImportError: print 'matplotlib not availabe, scatter plots not available' matplotlib = False def main(revisions=None): benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') suite=suites.suite_optimal_strips() configs = { IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), } exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl'], processes=4, email='[email protected]', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step() #if matplotlib: #for attribute in ["memory", "total_time"]: #for config in configs: #exp.add_report( #RelativeScatterPlotReport( #attributes=[attribute], #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], #get_category=lambda run1, run2: run1.get("domain"), #), #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) #) exp() main(revisions=['issue666-base', 'issue666-v1'])
5,710
Python
61.758241
355
0.70683
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue666/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
Python
35.654205
84
0.597042
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/v4.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue908-base", "issue908-v4"] CONFIGS = [ IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') attributes=exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend([ Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), ]) #exp.add_absolute_report_step() exp.add_comparison_table_step(attributes=attributes) exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) exp.run_steps()
2,121
Python
32.156249
164
0.751061
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/v5.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue908-v4", "issue908-v5"] CONFIGS = [ IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') attributes=exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend([ Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), ]) #exp.add_absolute_report_step() exp.add_comparison_table_step(attributes=attributes) exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) exp.run_steps()
2,119
Python
32.124999
164
0.750826
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('generator_computation_time', 'generator computation time: (.+)s', required=False, type=float) parser.add_pattern('cpdbs_computation_time', 'Canonical PDB heuristic computation time: (.+)s', required=False, type=float) parser.add_pattern('dominance_pruning_time', 'Dominance pruning took (.+)s', required=False, type=float) parser.parse()
432
Python
38.363633
123
0.75
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = get_algo_nick(rev1, config_nick) algo2 = get_algo_nick(rev2, config_nick) report = report_class( filter_algorithm=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"]) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,743
Python
36.42132
82
0.61982
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/v6.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue908-base", "issue908-v6"] CONFIGS = [ IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), IssueConfig("cpdbs-sys3", ['--search', 'astar(cpdbs(systematic(3)))']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') attributes=exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend([ Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), ]) #exp.add_absolute_report_step() exp.add_comparison_table_step(attributes=attributes) exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) exp.run_steps()
2,197
Python
32.815384
164
0.746472
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue908-base", "issue908-v1"] CONFIGS = [ IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_parse_again_step() attributes=exp.DEFAULT_TABLE_ATTRIBUTES attributes.append( Attribute('computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), ) #exp.add_absolute_report_step() exp.add_comparison_table_step(attributes=attributes) exp.add_scatter_plot_step(relative=True, attributes=["search_time", "total_time"]) exp.run_steps()
1,854
Python
28.444444
93
0.74973
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
Python
35.566037
78
0.59871
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue908-base", "issue908-v3"] CONFIGS = [ IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') attributes=exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend([ Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), ]) #exp.add_absolute_report_step() exp.add_comparison_table_step(attributes=attributes) exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) exp.run_steps()
2,121
Python
32.156249
164
0.751061
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue908/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue908-base", "issue908-v2"] CONFIGS = [ IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') attributes=exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend([ Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), ]) #exp.add_absolute_report_step() exp.add_comparison_table_step(attributes=attributes) exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) exp.run_steps()
2,121
Python
32.156249
164
0.751061
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue439/issue439.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import configs, suites import common_setup SEARCH_REVS = ["issue439-base", "issue439-v1"] LIMITS = {"search_time": 300} SUITE = suites.suite_satisficing_with_ipc11() configs_satisficing_core = configs.configs_satisficing_core() CONFIGS = {} for name in ["eager_greedy_add", "eager_greedy_ff", "lazy_greedy_add", "lazy_greedy_ff"]: CONFIGS[name] = configs_satisficing_core[name] exp = common_setup.IssueExperiment( revisions=SEARCH_REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_report(common_setup.RegressionReport( revision_nicks=exp.revision_nicks, config_nicks=CONFIGS.keys())) exp()
779
Python
21.941176
61
0.695764
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue439/regressions.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import configs import common_setup SEARCH_REVS = ["issue439-base", "issue439-v1"] LIMITS = {"search_time": 1800} SUITE = [ "airport:p45-airport5MUC-p6.pddl", "elevators-sat08-strips:p22.pddl", "parking-sat11-strips:pfile09-033.pddl", "scanalyzer-08-strips:p30.pddl", "transport-sat11-strips:p14.pddl", "transport-sat11-strips:p16.pddl", "trucks:p19.pddl", "trucks-strips:p23.pddl", ] configs_satisficing_core = configs.configs_satisficing_core() CONFIGS = {} for name in ["eager_greedy_add", "eager_greedy_ff", "lazy_greedy_add", "lazy_greedy_ff"]: CONFIGS[name] = configs_satisficing_core[name] exp = common_setup.IssueExperiment( revisions=SEARCH_REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_search_parser("custom-parser.py") attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"] exp.add_absolute_report_step(attributes=attributes) exp.add_comparison_table_step(attributes=attributes) exp.add_report(common_setup.RegressionReport( revision_nicks=exp.revision_nicks, config_nicks=CONFIGS.keys(), attributes=attributes)) exp()
1,209
Python
25.304347
68
0.701406
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue439/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.reports import Table from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports import PlanningReport from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" # TODO: Add something about errors/exit codes. DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for attribute in valid_attributes: name = "-".join([rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots)) class RegressionReport(PlanningReport): """ Compare revisions for tasks on which the first revision performs better than other revisions. *revision_nicks* must be a list of revision_nicks, e.g. ["default", "issue123"]. *config_nicks* must be a list of configuration nicknames, e.g. ["eager_greedy_ff", "eager_greedy_add"]. *regression_attribute* is the attribute that we compare between different revisions. It defaults to "coverage". Example comparing search_time for tasks were we lose coverage:: exp.add_report(RegressionReport(revision_nicks=["default", "issue123"], config_nicks=["eager_greedy_ff"], regression_attribute="coverage", attributes="search_time")) """ def __init__(self, revision_nicks, config_nicks, regression_attribute="coverage", **kwargs): PlanningReport.__init__(self, **kwargs) assert revision_nicks self.revision_nicks = revision_nicks assert config_nicks self.config_nicks = config_nicks self.regression_attribute = regression_attribute def get_markup(self): tables = [] for (domain, problem) in self.problems: for config_nick in self.config_nicks: runs = [self.runs[(domain, problem, rev + "-" + config_nick)] for rev in self.revision_nicks] if any(runs[0][self.regression_attribute] > runs[i][self.regression_attribute] for i in range(1, len(self.revision_nicks))): print "\"%s:%s\"," % (domain, problem) table = Table() for rev, run in zip(self.revision_nicks, runs): for attr in self.attributes: table.add_cell(rev, attr, run.get(attr)) table_name = ":".join((domain, problem, config_nick)) tables.append((table_name, table)) return "\n".join(name + "\n" + str(table) for name, table in tables)
14,920
Python
36.3025
79
0.601475
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue439/issue439-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import configs, suites import common_setup SEARCH_REVS = ["issue439-base", "issue439-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() configs_satisficing_core = configs.configs_satisficing_core() CONFIGS = {} for name in ["eager_greedy_add", "eager_greedy_ff", "lazy_greedy_add", "lazy_greedy_ff"]: CONFIGS[name] = configs_satisficing_core[name] exp = common_setup.IssueExperiment( revisions=SEARCH_REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_search_parser("custom-parser.py") attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"] exp.add_absolute_report_step(attributes=attributes) exp.add_comparison_table_step(attributes=attributes) exp.add_report(common_setup.RegressionReport( revision_nicks=exp.revision_nicks, config_nicks=CONFIGS.keys(), attributes=attributes)) exp()
960
Python
24.972972
68
0.716667
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue439/custom-parser.py
#! /usr/bin/env python from lab.parser import Parser class CustomParser(Parser): def __init__(self): Parser.__init__(self) self.add_pattern( "init_time", "Best heuristic value: \d+ \[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", required=True, type=float) if __name__ == "__main__": parser = CustomParser() print "Running custom parser" parser.parse()
441
Python
21.099999
90
0.54195
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue540/v1-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue540-base", "issue540-v1"] LIMITS = {"search_time": 300} SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "blind": ["--search", "astar(blind())"], "ipdb": ["--search", "astar(ipdb(max_time=150))"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
491
Python
17.923076
54
0.635438
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue540/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,755
Python
35.135977
79
0.610349
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue540/v1-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue540-base", "issue540-v1"] LIMITS = {"search_time": 300} SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { "blind": ["--search", "astar(blind())"], "lama-first": [ "--if-unit-cost", "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", "--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm])", "--if-non-unit-cost", "--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))", "--search", "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],cost_type=one,reopen_closed=false)", "--always"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
940
Python
24.432432
97
0.58617
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/v8-sat-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["release32", "release64"] CONFIG_DICT = { "eager_greedy_ff": [ "--evaluator", "h=ff()", "--search", "eager_greedy([h], preferred=[h])"], "eager_greedy_cea": [ "--evaluator", "h=cea()", "--search", "eager_greedy([h], preferred=[h])"], "lazy_greedy_add": [ "--evaluator", "h=add()", "--search", "lazy_greedy([h], preferred=[h])"], "lazy_greedy_cg": [ "--evaluator", "h=cg()", "--search", "lazy_greedy([h], preferred=[h])"], "lama-first": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], "lama-first-typed": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", "lazy(alt([single(hff), single(hff, pref_only=true)," "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," "preferred_successors_first=false)"], } CONFIGS = [ IssueConfig( "-".join([config_nick, build]), config, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_DICT.items() ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick in CONFIG_DICT.keys()] outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), outfile=outfile) exp.add_step( 'publish-report', subprocess.call, ['publish', outfile]) exp.run_steps()
3,818
Python
32.208695
103
0.625196
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue213/base-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISION = "issue213-base" BUILDS = ["release32", "release64"] SEARCHES = [ ("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"), ("blind", "astar(blind())"), ("cegar", "astar(cegar())"), ("divpot", "astar(diverse_potentials())"), ("ipdb", "astar(ipdb(max_time=900))"), ("lmcut", "astar(lmcut())"), ("mas", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false), " "merge_strategy=merge_dfp(), " "label_reduction=exact(before_shrinking=true, before_merging=false), max_states=100000, threshold_before_merge=1))"), ("seq", "astar(operatorcounting([state_equation_constraints()]))"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=[REVISION], configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() algorithm_pairs = [] for build1, build2 in itertools.combinations(BUILDS, 2): for config_nick, search in SEARCHES: algorithm_pairs.append( ("{REVISION}-{config_nick}-{build1}".format(**locals()), "{REVISION}-{config_nick}-{build2}".format(**locals()), "Diff ({})".format(config_nick))) exp.add_report( ComparativeReport( algorithm_pairs, attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), name="issue213-opt-comparison") #exp.add_scatter_plot_step(attributes=["total_time", "memory"]) exp.run_steps()
2,257
Python
30.361111
125
0.666371