repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
DAAISy
DAAISy-main/dependencies/FD/experiments/issue436/opt-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import configs import common_setup REVS = ["issue436-base", "issue436-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() configs_optimal_core = configs.configs_optimal_core() CONFIGS = {} for name in ['astar_merge_and_shrink_greedy_bisim', 'astar_merge_and_shrink_dfp_bisim', 'astar_ipdb', 'astar_hmax', 'astar_blind', 'astar_lmcut', 'astar_merge_and_shrink_bisim', 'astar_lmcount_lm_merged_rhw_hm']: CONFIGS[name] = configs_optimal_core[name] exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_absolute_report_step() exp.add_comparison_table_step() exp()
780
22.666667
87
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue436/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,755
35.135977
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue436/sat-v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import configs import common_setup REVS = ["issue436-base", "issue436-v2"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() default_configs_satisficing = configs.default_configs_satisficing(extended=True) CONFIGS = {} for name in ['lazy_greedy_add', 'eager_greedy_ff', 'eager_greedy_add', 'lazy_greedy_ff', 'pareto_ff']: CONFIGS[name] = default_configs_satisficing[name] exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(attributes=['total_time', 'memory']) exp()
749
22.4375
102
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue436/configs.py
def configs_optimal_core(): return { # A* "astar_blind": [ "--search", "astar(blind)"], "astar_h2": [ "--search", "astar(hm(2))"], "astar_ipdb": [ "--search", "astar(ipdb)"], "astar_lmcount_lm_merged_rhw_hm": [ "--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], "astar_lmcut": [ "--search", "astar(lmcut)"], "astar_hmax": [ "--search", "astar(hmax)"], "astar_merge_and_shrink_bisim": [ "--search", "astar(merge_and_shrink(" "merge_strategy=merge_linear(variable_order=reverse_level)," "shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false," "group_by_h=true)))"], "astar_merge_and_shrink_greedy_bisim": [ "--search", "astar(merge_and_shrink(" "merge_strategy=merge_linear(variable_order=reverse_level)," "shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1," "greedy=true,group_by_h=false)))"], "astar_merge_and_shrink_dfp_bisim": [ "--search", "astar(merge_and_shrink(merge_strategy=merge_dfp," "shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1," "greedy=false,group_by_h=true)))"], "astar_selmax_lmcut_lmcount": [ "--search", "astar(selmax([lmcut(),lmcount(lm_merged([lm_hm(m=1),lm_rhw()])," "admissible=true)],training_set=1000),mpd=true)"], } def configs_satisficing_core(): return { # A* "astar_goalcount": [ "--search", "astar(goalcount)"], # eager greedy "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_add": [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_cg": [ "--heuristic", "h=cg()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_cea": [ "--heuristic", "h=cea()", "--search", "eager_greedy(h, preferred=h)"], # lazy greedy "lazy_greedy_ff": [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_add": [ "--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_cg": [ "--heuristic", "h=cg()", "--search", "lazy_greedy(h, preferred=h)"], } def configs_optimal_ipc(): return { "seq_opt_merge_and_shrink": ["ipc", "seq-opt-merge-and-shrink"], "seq_opt_fdss_1": ["ipc", "seq-opt-fdss-1"], "seq_opt_fdss_2": ["ipc", "seq-opt-fdss-2"], } def configs_satisficing_ipc(): return { "seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"], "seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"], "seq_sat_fdss_2": ["ipc", "seq-sat-fdss-2"], } def configs_optimal_extended(): return { # A* "astar_lmcount_lm_merged_rhw_hm_no_order": [ "--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], } def configs_satisficing_extended(): return { # eager greedy "eager_greedy_alt_ff_cg": [ "--heuristic", "hff=ff()", "--heuristic", "hcg=cg()", "--search", "eager_greedy(hff,hcg,preferred=[hff,hcg])"], "eager_greedy_ff_no_pref": [ "--search", "eager_greedy(ff())"], # lazy greedy "lazy_greedy_alt_cea_cg": [ "--heuristic", "hcea=cea()", "--heuristic", "hcg=cg()", "--search", "lazy_greedy(hcea,hcg,preferred=[hcea,hcg])"], "lazy_greedy_ff_no_pref": [ "--search", "lazy_greedy(ff())"], "lazy_greedy_cea": [ "--heuristic", "h=cea()", "--search", "lazy_greedy(h, preferred=h)"], # lazy wA* "lazy_wa3_ff": [ "--heuristic", "h=ff()", "--search", "lazy_wastar(h,w=3,preferred=h)"], # eager wA* "eager_wa3_cg": [ "--heuristic", "h=cg()", "--search", "eager(single(sum([g(),weight(h,3)])),preferred=h)"], # ehc "ehc_ff": [ "--search", "ehc(ff())"], # iterated "iterated_wa_ff": [ "--heuristic", "h=ff()", "--search", "iterated([lazy_wastar(h,w=10), lazy_wastar(h,w=5), lazy_wastar(h,w=3)," "lazy_wastar(h,w=2), lazy_wastar(h,w=1)])"], # pareto open list "pareto_ff": [ "--heuristic", "h=ff()", "--search", "eager(pareto([sum([g(), h]), h]), reopen_closed=true, pathmax=false," "f_eval=sum([g(), h]))"], # bucket-based open list "bucket_lmcut": [ "--heuristic", "h=lmcut()", "--search", "eager(single_buckets(h), reopen_closed=true, pathmax=false)"], } def default_configs_optimal(core=True, ipc=True, extended=False): configs = {} if core: configs.update(configs_optimal_core()) if ipc: configs.update(configs_optimal_ipc()) if extended: configs.update(configs_optimal_extended()) return configs def default_configs_satisficing(core=True, ipc=True, extended=False): configs = {} if core: configs.update(configs_satisficing_core()) if ipc: configs.update(configs_satisficing_ipc()) if extended: configs.update(configs_satisficing_extended()) return configs
6,207
29.282927
89
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue671/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os import suites from lab.reports import Attribute, gm from common_setup import IssueConfig, IssueExperiment try: from relativescatter import RelativeScatterPlotReport matplotlib = True except ImportError: print 'matplotlib not availabe, scatter plots not available' matplotlib = False def main(revisions=None): benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') suite=suites.suite_all() configs = { IssueConfig('blind', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '60s']), IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first', '--search-time-limit', '60s']), } exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'], processes=4, email='[email protected]', ) attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.append('translator_*') exp.add_comparison_table_step() if matplotlib: for attribute in ["memory", "total_time"]: for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) ) exp() main(revisions=['issue671-base', 'issue671-v1'])
1,711
30.127273
110
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue671/suites.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import argparse import textwrap HELP = "Convert suite name to list of domains or tasks." def suite_alternative_formulations(): return ['airport-adl', 'no-mprime', 'no-mystery'] def suite_ipc98_to_ipc04_adl(): return [ 'assembly', 'miconic-fulladl', 'miconic-simpleadl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'schedule', ] def suite_ipc98_to_ipc04_strips(): return [ 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', 'satellite', 'zenotravel', ] def suite_ipc98_to_ipc04(): # All IPC1-4 domains, including the trivial Movie. return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) def suite_ipc06_adl(): return [ 'openstacks', 'pathways', 'trucks', ] def suite_ipc06_strips_compilations(): return [ 'openstacks-strips', 'pathways-noneg', 'trucks-strips', ] def suite_ipc06_strips(): return [ 'pipesworld-tankage', 'rovers', 'storage', 'tpp', ] def suite_ipc06(): return sorted(suite_ipc06_adl() + suite_ipc06_strips()) def suite_ipc08_common_strips(): return [ 'parcprinter-08-strips', 'pegsol-08-strips', 'scanalyzer-08-strips', ] def suite_ipc08_opt_adl(): return ['openstacks-opt08-adl'] def suite_ipc08_opt_strips(): return sorted(suite_ipc08_common_strips() + [ 'elevators-opt08-strips', 'openstacks-opt08-strips', 'sokoban-opt08-strips', 'transport-opt08-strips', 'woodworking-opt08-strips', ]) def suite_ipc08_opt(): return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) def suite_ipc08_sat_adl(): return ['openstacks-sat08-adl'] def suite_ipc08_sat_strips(): return sorted(suite_ipc08_common_strips() + [ # Note: cyber-security is missing. 'elevators-sat08-strips', 'openstacks-sat08-strips', 'sokoban-sat08-strips', 'transport-sat08-strips', 'woodworking-sat08-strips', ]) def suite_ipc08_sat(): return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) def suite_ipc08(): return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) def suite_ipc11_opt(): return [ 'barman-opt11-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'nomystery-opt11-strips', 'openstacks-opt11-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'pegsol-opt11-strips', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'tidybot-opt11-strips', 'transport-opt11-strips', 'visitall-opt11-strips', 'woodworking-opt11-strips', ] def suite_ipc11_sat(): return [ 'barman-sat11-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'nomystery-sat11-strips', 'openstacks-sat11-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'pegsol-sat11-strips', 'scanalyzer-sat11-strips', 'sokoban-sat11-strips', 'tidybot-sat11-strips', 'transport-sat11-strips', 'visitall-sat11-strips', 'woodworking-sat11-strips', ] def suite_ipc11(): return sorted(suite_ipc11_opt() + suite_ipc11_sat()) def suite_ipc14_agl_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_agl_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-agl14-strips', 'openstacks-agl14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_agl(): return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) def suite_ipc14_mco_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_mco_strips(): return [ 'barman-mco14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_mco(): return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) def suite_ipc14_opt_adl(): return [ 'cavediving-14-adl', 'citycar-opt14-adl', 'maintenance-opt14-adl', ] def suite_ipc14_opt_strips(): return [ 'barman-opt14-strips', 'childsnack-opt14-strips', 'floortile-opt14-strips', 'ged-opt14-strips', 'hiking-opt14-strips', 'openstacks-opt14-strips', 'parking-opt14-strips', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'transport-opt14-strips', 'visitall-opt14-strips', ] def suite_ipc14_opt(): return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) def suite_ipc14_sat_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_sat_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_sat(): return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) def suite_ipc14(): return sorted(set( suite_ipc14_agl() + suite_ipc14_mco() + suite_ipc14_opt() + suite_ipc14_sat())) def suite_unsolvable(): return sorted( ['mystery:prob%02d.pddl' % index for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) def suite_optimal_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) def suite_optimal_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + suite_ipc11_opt() + suite_ipc14_opt_strips()) def suite_optimal(): return sorted(suite_optimal_adl() + suite_optimal_strips()) def suite_satisficing_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) def suite_satisficing_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + suite_ipc11_sat() + suite_ipc14_sat_strips()) def suite_satisficing(): return sorted(suite_satisficing_adl() + suite_satisficing_strips()) def suite_all(): return sorted( suite_ipc98_to_ipc04() + suite_ipc06() + suite_ipc06_strips_compilations() + suite_ipc08() + suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("suite", help="suite name") return parser.parse_args() def main(): prefix = "suite_" suite_names = [ name[len(prefix):] for name in sorted(globals().keys()) if name.startswith(prefix)] parser = argparse.ArgumentParser(description=HELP) parser.add_argument("suite", choices=suite_names, help="suite name") parser.add_argument( "--width", default=72, type=int, help="output line width (default: %(default)s). Use 1 for single " "column.") args = parser.parse_args() suite_func = globals()[prefix + args.suite] print(textwrap.fill( str(suite_func()), width=args.width, break_long_words=False, break_on_hyphens=False)) if __name__ == "__main__": main()
8,551
23.364672
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue671/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=None, **kwargs): """ If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(benchmarks_dir, suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,496
33.907821
83
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue671/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
35.654206
84
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue781/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue781-v2"] CONFIGS = [ IssueConfig( "{heuristic}-{pruning}-min-{min_ratio}".format(**locals()), ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}(min_required_pruning_ratio={min_ratio}))".format(**locals())]) for heuristic in ["blind", "lmcut"] for pruning in ["ec", "queue", "simple"] for min_ratio in [0.0, 0.2] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) #exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) exp.add_parser('pruning_parser', os.path.join(common_setup.get_script_dir(), "parser.py")) exp.add_absolute_report_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"]) #exp.add_comparison_table_step() exp.run_steps()
1,693
32.215686
137
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue781/parser.py
#! /usr/bin/env python from lab.parser import Parser def main(): print 'Running custom parser' parser = Parser() parser.add_pattern('time_for_pruning_operators', r'^Time for pruning operators: (.+)s$', type=float, flags="M") parser.parse() main()
268
18.214286
115
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue781/v2-v3-combined.py
#! /usr/bin/env python from collections import defaultdict import os.path import sys import common_setup FILE = os.path.abspath(__file__) DIR = os.path.dirname(FILE) FILENAME = os.path.splitext(os.path.basename(__file__))[0] EXPS = os.path.join(DIR, "data") EXPPATH = os.path.join(EXPS, FILENAME) def remove_file(filename): try: os.remove(filename) except OSError: pass exp = common_setup.IssueExperiment() exp.steps = [] exp.add_step( 'remove-combined-properties', remove_file, os.path.join(exp.eval_dir, "properties")) exp.add_fetcher(os.path.join(EXPS, "issue781-v2-eval"), merge=True) exp.add_fetcher(os.path.join(EXPS, "issue781-v3-queue-ratio-eval"), merge=True) ATTRIBUTES = [ "cost", "error", "run_dir", "search_start_time", "search_start_memory", "coverage", "expansions_until_last_jump", "total_time", "initial_h_value", "search_time", "abstractions", "stored_heuristics", "stored_values", "stored_lookup_tables", ] exp.add_absolute_report_step( filter_algorithm=[ "issue781-v2-blind-ec-min-0.0", "issue781-v2-blind-ec-min-0.2", "issue781-v2-blind-queue-min-0.0", "issue781-v3-blind-queue-min-0.2", "issue781-v2-blind-simple-min-0.0", "issue781-v2-blind-simple-min-0.2", "issue781-v2-lmcut-ec-min-0.0", "issue781-v2-lmcut-ec-min-0.2", "issue781-v2-lmcut-queue-min-0.0", "issue781-v3-lmcut-queue-min-0.2", "issue781-v2-lmcut-simple-min-0.0", "issue781-v2-lmcut-simple-min-0.2"], attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"]) exp.run_steps()
1,668
29.345455
102
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue781/v1-blind.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue781-v1"] CONFIGS = [ IssueConfig( heuristic + "-" + pruning, ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}())".format(**locals())]) for heuristic in ["blind"] for pruning in ["ec", "queue", "simple"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) #exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) exp.add_absolute_report_step() #exp.add_comparison_table_step() exp.run_steps()
1,401
28.208333
99
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue781/v4-extensions.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue781-v3", "issue781-v4-wss", "issue781-v4-varmark", "issue781-v4-opportunistic"] CONFIGS = [ IssueConfig( "{heuristic}-{pruning}".format(**locals()), ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}())".format(**locals())]) for heuristic in ["blind", "lmcut"] for pruning in ["queue"] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) #exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) exp.add_parser('pruning_parser', os.path.join(common_setup.get_script_dir(), "parser.py")) exp.add_absolute_report_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"]) exp.add_report(common_setup.ComparativeReport([ ("issue781-v3-{heuristic}-queue".format(**locals()), "issue781-v4-{extension}-{heuristic}-queue".format(**locals())) for heuristic in ["blind", "lmcut"] for extension in ["wss", "varmark", "opportunistic"]], attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"])) exp.run_steps()
2,009
35.545455
99
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue781/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") revisions = revisions or [] configs = configs or [] for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,222
35.84715
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue781/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue781/v3-queue-ratio.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue781-v3"] CONFIGS = [ IssueConfig( "{heuristic}-{pruning}-min-{min_ratio}".format(**locals()), ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}(min_required_pruning_ratio={min_ratio}))".format(**locals())]) for heuristic in ["blind", "lmcut"] for pruning in ["queue"] for min_ratio in [0.2] ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) #exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) exp.add_parser('pruning_parser', os.path.join(common_setup.get_script_dir(), "parser.py")) exp.add_absolute_report_step( attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"]) #exp.add_comparison_table_step() exp.run_steps()
1,672
31.803922
137
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue635/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from common_setup import IssueConfig, IssueExperiment, is_test_run BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue635-base", "issue635-v2"] CONFIGS = [ IssueConfig( heuristic, ["--search", "astar({})".format(heuristic)], driver_options=["--search-time-limit", "10m"]) for heuristic in ["hm(m=2)"] ] SUITE = [ 'airport', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips', 'woodworking-opt11-strips', 'zenotravel'] ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"]) exp()
1,787
32.735849
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue635/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from common_setup import IssueConfig, IssueExperiment, is_test_run BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue635-base", "issue635-v1"] CONFIGS = [ IssueConfig( heuristic, ["--search", "astar({})".format(heuristic)], driver_options=["--search-time-limit", "10m"]) for heuristic in ["hm(m=2)", "ipdb()", "cea()", "cg()"] ] SUITE = [ 'airport', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips', 'woodworking-opt11-strips', 'zenotravel'] ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"]) exp()
1,814
33.245283
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue635/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step( 'publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step( "publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step(step_name, make_scatter_plots))
11,446
33.068452
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue635/v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from common_setup import IssueConfig, IssueExperiment, is_test_run BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue635-base", "issue635-v3"] CONFIGS = [ IssueConfig( heuristic, ["--search", "astar({})".format(heuristic)], driver_options=["--search-time-limit", "10m"]) for heuristic in ["hm(m=2)"] ] SUITE = [ 'airport', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips', 'woodworking-opt11-strips', 'zenotravel'] ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"]) exp()
1,787
32.735849
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue635/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
35.654206
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue839/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,786
36.435443
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue839/v1-lama-first.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue839-v1"] BUILDS = ["release32"] CONFIG_NICKS = [ ("lama-first-syn", [ "--heuristic", """hlm=lama_synergy(lm_rhw(reasonable_orders=true), transform=adapt_costs(one))""", "--heuristic", "hff=ff_synergy(hlm)", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), ("lama-first-no-syn-pref-false", [ "--heuristic", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), pref=false)", "--heuristic", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), ("lama-first-no-syn-pref-true", [ "--heuristic", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), pref=true)", "--heuristic", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), ] CONFIGS = [ IssueConfig( config_nick, config, build_options=[build], driver_options=["--build", build]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES for build in BUILDS: algorithm_pairs = [ ("{rev}-{nick1}".format(**locals()), "{rev}-{nick2}".format(**locals()), "Diff ({rev})".format(**locals())) for (nick1, _), (nick2, _) in itertools.combinations(CONFIG_NICKS, 2)] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue839-{nick1}-vs-{nick2}".format(**locals())) exp.run_steps()
3,160
32.62766
109
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue839/v1-lama.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue839-v1"] BUILDS = ["release32"] CONFIG_NICKS = [ ("lama-syn", [ "--if-unit-cost", "--evaluator", "hlm=lama_synergy(lm_rhw(reasonable_orders=true))", "--evaluator", "hff=ff_synergy(hlm)", "--search", """iterated([ lazy_greedy([hff,hlm],preferred=[hff,hlm]), lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) ],repeat_last=true,continue_on_fail=true)""", "--if-non-unit-cost", "--evaluator", "hlm1=lama_synergy(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))", "--evaluator", "hff1=ff_synergy(hlm1)", "--evaluator", "hlm2=lama_synergy(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", "--evaluator", "hff2=ff_synergy(hlm2)", "--search", """iterated([ lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], cost_type=one,reopen_closed=false), lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], reopen_closed=false), lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) ],repeat_last=true,continue_on_fail=true)""", "--always"]), ] + [ ("lama-no-syn-pref-{pref}".format(**locals()), [ "--if-unit-cost", "--evaluator", "hlm=lmcount(lm_rhw(reasonable_orders=true), pref={pref})".format(**locals()), "--evaluator", "hff=ff()", "--search", """iterated([ lazy_greedy([hff,hlm],preferred=[hff,hlm]), lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) ],repeat_last=true,continue_on_fail=true)""", "--if-non-unit-cost", "--evaluator", "hlm1=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(one), pref={pref})".format(**locals()), "--evaluator", "hff1=ff(transform=adapt_costs(one))", "--evaluator", "hlm2=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(plusone), pref={pref})".format(**locals()), "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", "--search", """iterated([ lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], cost_type=one,reopen_closed=false), lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], reopen_closed=false), lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) ],repeat_last=true,continue_on_fail=true)""", "--always"]) for pref in [True, False] ] CONFIGS = [ IssueConfig( config_nick, config, build_options=[build], driver_options=["--build", build]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES for build in BUILDS: algorithm_pairs = [ ("{rev}-{nick1}".format(**locals()), "{rev}-{nick2}".format(**locals()), "Diff ({rev})".format(**locals())) for (nick1, _), (nick2, _) in itertools.combinations(CONFIG_NICKS, 2)] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue839-{nick1}-vs-{nick2}".format(**locals())) exp.run_steps()
5,589
39.80292
119
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue839/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue554/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in self.get_supported_attributes( config_nick, attributes): make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,856
34.913408
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue554/issue554.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue554-base", "issue554-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "astar_hmax": ["--search", "astar(hmax())"], "gbfs_gc": ["--search", "eager_greedy(goalcount())"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
498
18.96
57
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue387/common_setup.py
# -*- coding: utf-8 -*- import os.path from lab.environments import MaiaEnvironment from lab.steps import Step from downward.checkouts import Translator, Preprocessor, Planner from downward.experiments import DownwardExperiment from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the filename of the main script, e.g. "/ham/spam/eggs.py" => "eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Found by searching upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found.""" path = os.path.abspath(get_script_dir()) while True: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) class MyExperiment(DownwardExperiment): DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "total_time", "search_time", "memory", "expansions_until_last_jump", ] """Wrapper for DownwardExperiment with a few convenience features.""" def __init__(self, configs=None, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. If "configs" is specified, it should be a dict of {nick: cmdline} pairs that sets the planner configurations to test. If "grid_priority" is specified and no environment is specifically requested in **kwargs, use the maia environment with the specified priority. If "path" is not specified, the experiment data path is derived automatically from the main script's filename. If "repo" is not specified, the repository base is derived automatically from the main script's path. If "revisions" is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. If "search_revisions" is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All experiments use the translator and preprocessor component of the first revision. If "suite" is specified, it should specify a problem suite. Options "combinations" (from the base class), "revisions" and "search_revisions" are mutually exclusive.""" if grid_priority is not None and "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() num_rev_opts_specified = ( int(revisions is not None) + int(search_revisions is not None) + int(kwargs.get("combinations") is not None)) if num_rev_opts_specified > 1: raise ValueError('must specify exactly one of "revisions", ' '"search_revisions" or "combinations"') # See add_comparison_table_step for more on this variable. self._HACK_revisions = revisions if revisions is not None: if not revisions: raise ValueError("revisions cannot be empty") combinations = [(Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions] kwargs["combinations"] = combinations if search_revisions is not None: if not search_revisions: raise ValueError("search_revisions cannot be empty") base_rev = search_revisions[0] translator = Translator(repo, base_rev) preprocessor = Preprocessor(repo, base_rev) combinations = [(translator, preprocessor, Planner(repo, rev)) for rev in search_revisions] kwargs["combinations"] = combinations DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) if configs is not None: for nick, config in configs.items(): self.add_config(nick, config) if suite is not None: self.add_suite(suite) self._report_prefix = get_experiment_name() def add_comparison_table_step(self, attributes=None): revisions = self._HACK_revisions if revisions is None: # TODO: It's not clear to me what a "revision" in the # overall context of the code really is, e.g. when keeping # the translator and preprocessor method fixed and only # changing the search component. It's also not really # clear to me how the interface of the Compare... reports # works and how to use it more generally. Hence the # present hack. # Ideally, this method should look at the table columns we # have (defined by planners and planner configurations), # pair them up in a suitable way, either controlled by a # convenience parameter or a more general grouping method, # and then use this to define which pairs go together. raise NotImplementedError( "only supported when specifying revisions in __init__") if attributes is None: attributes = self.DEFAULT_TABLE_ATTRIBUTES report = CompareRevisionsReport(*revisions, attributes=attributes) self.add_report(report, outfile="%s-compare.html" % self._report_prefix) def add_scatter_plot_step(self, attributes=None): if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES revisions = self._HACK_revisions if revisions is None: # TODO: See add_comparison_table_step. raise NotImplementedError( "only supported when specifying revisions in __init__") if len(revisions) != 2: # TODO: Should generalize this, too, by offering a general # grouping function and then comparing any pair of # settings in the same group. raise NotImplementedError("need two revisions") scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plots(): configs = [conf[0] for conf in self.configs] for nick in configs: config_before = "%s-%s" % (revisions[0], nick) config_after = "%s-%s" % (revisions[1], nick) for attribute in attributes: name = "%s-%s-%s" % (self._report_prefix, attribute, nick) report = ScatterPlotReport( filter_config=[config_before, config_after], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots))
8,551
37.008889
80
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue387/issue387.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward.suites import suite_optimal_with_ipc11 from downward.configs import default_configs_optimal import common_setup REVS = ["issue387-base", "issue387-v1"] CONFIGS = default_configs_optimal() # remove config that is disabled in this branch del CONFIGS['astar_selmax_lmcut_lmcount'] TEST_RUN = True if TEST_RUN: SUITE = "gripper:prob01.pddl" PRIORITY = None # "None" means local experiment else: SUITE = suite_optimal_with_ipc11() PRIORITY = 0 # number means maia experiment # TODO: I'd like to specify "search_revisions" (which uses the same # translator and preprocessor for everything) instead of "revisions" # here, but I can't seem to make this work with the REVS argument for # CompareRevisionsReport. exp = common_setup.MyExperiment( grid_priority=PRIORITY, revisions=REVS, configs=CONFIGS, suite=SUITE ) exp.add_comparison_table_step() exp.add_scatter_plot_step() exp()
988
22
69
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue744/v1-sat-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue744-v1"] CONFIG_DICT = { "eager-greedy-ff-silent": [ "--evaluator", "h=ff()", "--search", "eager_greedy([h], preferred=[h], verbosity=silent)"], "eager-greedy-cea-silent": [ "--evaluator", "h=cea()", "--search", "eager_greedy([h], preferred=[h], verbosity=silent)"], "lazy-greedy-add-silent": [ "--evaluator", "h=add()", "--search", "lazy_greedy([h], preferred=[h], verbosity=silent)"], "lazy-greedy-cg-silent": [ "--evaluator", "h=cg()", "--search", "lazy_greedy([h], preferred=[h], verbosity=silent)"], "lama-first-silent": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false, verbosity=silent)"""], "lama-first-typed-silent": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", "lazy(alt([single(hff), single(hff, pref_only=true)," "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," "preferred_successors_first=false, verbosity=silent)"], "eager-greedy-ff-normal": [ "--evaluator", "h=ff()", "--search", "eager_greedy([h], preferred=[h], verbosity=normal)"], "eager-greedy-cea-normal": [ "--evaluator", "h=cea()", "--search", "eager_greedy([h], preferred=[h], verbosity=normal)"], "lazy-greedy-add-normal": [ "--evaluator", "h=add()", "--search", "lazy_greedy([h], preferred=[h], verbosity=normal)"], "lazy-greedy-cg-normal": [ "--evaluator", "h=cg()", "--search", "lazy_greedy([h], preferred=[h], verbosity=normal)"], "lama-first-normal": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false, verbosity=normal)"""], "lama-first-typed-normal": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", "lazy(alt([single(hff), single(hff, pref_only=true)," "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," "preferred_successors_first=false, verbosity=normal)"], } CONFIGS = [ IssueConfig(config_nick, config, driver_options=["--overall-time-limit", "30m"]) for rev in REVISIONS for config_nick, config in CONFIG_DICT.items() ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('custom-parser.py') exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_parse_again_step() log_size = Attribute('log_size') attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size] exp.add_absolute_report_step(attributes=attributes) #exp.add_comparison_table_step() sort_spec = [('log_size', 'desc')] attributes = ['run_dir', 'log_size'] exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[ "{}-eager-greedy-ff-silent".format(REVISIONS[0]), "{}-eager-greedy-cea-silent".format(REVISIONS[0]), "{}-lazy-greedy-add-silent".format(REVISIONS[0]), "{}-lazy-greedy-cg-silent".format(REVISIONS[0]), "{}-lama-first-silent".format(REVISIONS[0]), "{}-lama-first-typed-silent".format(REVISIONS[0]), ],name="silent") exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[ "{}-eager-greedy-ff-normal".format(REVISIONS[0]), "{}-eager-greedy-cea-normal".format(REVISIONS[0]), "{}-lazy-greedy-add-normal".format(REVISIONS[0]), "{}-lazy-greedy-cg-normal".format(REVISIONS[0]), "{}-lama-first-normal".format(REVISIONS[0]), "{}-lama-first-typed-normal".format(REVISIONS[0]), ],name="normal") exp.run_steps()
5,733
36.477124
103
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue744/base-opt-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue744-base"] SEARCHES = [ ("bjolp", [ "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), ("blind", ["--search", "astar(blind())"]), ("cegar", ["--search", "astar(cegar())"]), ("divpot", ["--search", "astar(diverse_potentials())"]), ("ipdb", ["--search", "astar(ipdb())"]), ("lmcut", ["--search", "astar(lmcut())"]), ("mas", [ "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," " merge_strategy=merge_sccs(order_of_sccs=topological," " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," " label_reduction=exact(before_shrinking=true, before_merging=false)," " max_states=50000, threshold_before_merge=1))"]), ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), ("h2", ["--search", "astar(hm(m=2))"]), ("hmax", ["--search", "astar(hmax())"]), ] CONFIGS = [ IssueConfig(search_nick, search, driver_options=["--overall-time-limit", "30m"]) for rev in REVISIONS for search_nick, search in SEARCHES ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('custom-parser.py') exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") log_size = Attribute('log_size') attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size] exp.add_absolute_report_step(attributes=attributes) #exp.add_comparison_table_step() sort_spec = [('log_size', 'desc')] attributes = ['run_dir', 'log_size'] exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec) exp.run_steps()
2,858
33.035714
112
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue744/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport from sortedreport import SortedReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "quality", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_sorted_report_step(self, sort_spec, name=None, **kwargs): """Add step that makes a sorted report. """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = SortedReport(sort_spec, **kwargs) name = name or "sorted" name = "-" + name outfile = os.path.join( self.eval_dir, get_experiment_name() + name + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish{}-report'.format(name), subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print("Make scatter plot for", name) algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
15,419
36.518248
83
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue744/base-sat-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue744-base"] CONFIG_DICT = { "eager_greedy_ff": [ "--evaluator", "h=ff()", "--search", "eager_greedy([h], preferred=[h])"], "eager_greedy_cea": [ "--evaluator", "h=cea()", "--search", "eager_greedy([h], preferred=[h])"], "lazy_greedy_add": [ "--evaluator", "h=add()", "--search", "lazy_greedy([h], preferred=[h])"], "lazy_greedy_cg": [ "--evaluator", "h=cg()", "--search", "lazy_greedy([h], preferred=[h])"], "lama-first": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], "lama-first-typed": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", "lazy(alt([single(hff), single(hff, pref_only=true)," "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," "preferred_successors_first=false)"], } CONFIGS = [ IssueConfig(config_nick, config, driver_options=["--overall-time-limit", "30m"]) for rev in REVISIONS for config_nick, config in CONFIG_DICT.items() ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('custom-parser.py') exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") log_size = Attribute('log_size') attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size] exp.add_absolute_report_step(attributes=attributes) #exp.add_comparison_table_step() sort_spec = [('log_size', 'desc')] attributes = ['run_dir', 'log_size'] exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec) exp.run_steps()
3,211
30.80198
103
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue744/v1-opt-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue744-v1"] SEARCHES = [ ("bjolp-silent", [ "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc, verbosity=silent)"]), ("blind-silent", ["--search", "astar(blind(), verbosity=silent)"]), ("cegar-silent", ["--search", "astar(cegar(), verbosity=silent)"]), # ("divpot", ["--search", "astar(diverse_potentials(), verbosity=silent)"]), ("ipdb-silent", ["--search", "astar(ipdb(), verbosity=silent)"]), ("lmcut-silent", ["--search", "astar(lmcut(), verbosity=silent)"]), ("mas-silent", [ "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," " merge_strategy=merge_sccs(order_of_sccs=topological," " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," " label_reduction=exact(before_shrinking=true, before_merging=false)," " max_states=50000, threshold_before_merge=1, verbosity=normal), verbosity=silent)"]), # ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]), verbosity=silent)"]), ("h2-silent", ["--search", "astar(hm(m=2), verbosity=silent)"]), ("hmax-silent", ["--search", "astar(hmax(), verbosity=silent)"]), ("bjolp-normal", [ "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc, verbosity=normal)"]), ("blind-normal", ["--search", "astar(blind(), verbosity=normal)"]), ("cegar-normal", ["--search", "astar(cegar(), verbosity=normal)"]), # ("divpot", ["--search", "astar(diverse_potentials(), verbosity=normal)"]), ("ipdb-normal", ["--search", "astar(ipdb(), verbosity=normal)"]), ("lmcut-normal", ["--search", "astar(lmcut(), verbosity=normal)"]), ("mas-normal", [ "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," " merge_strategy=merge_sccs(order_of_sccs=topological," " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," " label_reduction=exact(before_shrinking=true, before_merging=false)," " max_states=50000, threshold_before_merge=1, verbosity=normal), verbosity=normal)"]), # ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]), verbosity=normal)"]), ("h2-normal", ["--search", "astar(hm(m=2), verbosity=normal)"]), ("hmax-normal", ["--search", "astar(hmax(), verbosity=normal)"]), ] CONFIGS = [ IssueConfig(search_nick, search, driver_options=["--overall-time-limit", "30m"]) for rev in REVISIONS for search_nick, search in SEARCHES ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('custom-parser.py') exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_parse_again_step() log_size = Attribute('log_size') attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size] exp.add_absolute_report_step(attributes=attributes) #exp.add_comparison_table_step() sort_spec = [('log_size', 'desc')] attributes = ['run_dir', 'log_size'] exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[ "{}-bjolp-silent".format(REVISIONS[0]), "{}-blind-silent".format(REVISIONS[0]), "{}-cegar-silent".format(REVISIONS[0]), "{}-ipdb-silent".format(REVISIONS[0]), "{}-lmcut-silent".format(REVISIONS[0]), "{}-mas-silent".format(REVISIONS[0]), "{}-h2-silent".format(REVISIONS[0]), "{}-hmax-silent".format(REVISIONS[0]), ],name="silent") exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[ "{}-bjolp-normal".format(REVISIONS[0]), "{}-blind-normal".format(REVISIONS[0]), "{}-cegar-normal".format(REVISIONS[0]), "{}-ipdb-normal".format(REVISIONS[0]), "{}-lmcut-normal".format(REVISIONS[0]), "{}-mas-normal".format(REVISIONS[0]), "{}-h2-normal".format(REVISIONS[0]), "{}-hmax-normal".format(REVISIONS[0]), ],name="normal") exp.run_steps()
5,254
42.07377
132
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue744/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue744/sortedreport.py
# -*- coding: utf-8 -*- # # Downward Lab uses the Lab package to conduct experiments with the # Fast Downward planning system. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from operator import itemgetter import logging from lab.reports import Table, DynamicDataModule from downward.reports import PlanningReport class SortedReport(PlanningReport): def __init__(self, sort_spec, **kwargs): PlanningReport.__init__(self, **kwargs) self._sort_spec = sort_spec def get_markup(self): """ Return `txt2tags <http://txt2tags.org/>`_ markup for the report. """ table = Table() row_sort_module = RowSortModule(self._sort_spec) table.dynamic_data_modules.append(row_sort_module) for run_id, run in self.props.items(): row = {} for key, value in run.items(): if key not in self.attributes: continue if isinstance(value, (list, tuple)): key = '-'.join([str(item) for item in value]) row[key] = value table.add_row(run_id, row) return str(table) class RowSortModule(DynamicDataModule): def __init__(self, sort_spec): self._sort_spec = sort_spec def modify_printable_row_order(self, table, row_order): col_names = [None] + table.col_names entries = [] for row_name in row_order: if row_name == 'column names (never printed)': continue entry = [row_name] + table.get_row(row_name) entries.append(tuple(entry)) for attribute, desc in reversed(self._sort_spec): index = col_names.index(attribute) reverse = desc == 'desc' entries.sort(key=itemgetter(index), reverse=reverse) new_row_order = ['column names (never printed)'] + [i[0] for i in entries] return new_row_order
2,524
32.223684
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue744/custom-parser.py
#! /usr/bin/env python from lab.parser import Parser def compute_log_size(content, props): props["log_size"] = len(content) def main(): parser = Parser() parser.add_function(compute_log_size) parser.parse() main()
234
15.785714
41
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue768/v1-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue768-base", "issue768-v1"] CONFIGS = [ IssueConfig('ipdb', ['--search', 'astar(ipdb(max_time=900))']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) #exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
1,443
29.083333
93
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue768/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,171
35.715026
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue768/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue688/v2-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue688-v2-base", "issue688-v2"] BUILDS = ["release32"] SEARCHES = [ ("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), ("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), ("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), search, build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] + [ IssueConfig( "lama-first-{build}".format(**locals()), [], build_options=[build], driver_options=["--build", build, "--alias", "lama-first"]) ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
1,525
27.259259
88
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue688/v2-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue688-v2-base", "issue688-v2"] BUILDS = ["release32"] SEARCHES = [ ("blind", ["--search", "astar(blind())"]), ("ipdb", ["--search", "astar(ipdb())"]), ("divpot", ["--search", "astar(diverse_potentials())"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), search, build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_report(RelativeScatterPlotReport( attributes=["search_time"], filter_algorithm=["issue688-v2-base-blind-release32", "issue688-v2-blind-release32"], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-blind-search_time.png".format(exp.name)) exp.run_steps()
1,566
27.490909
89
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue688/v1-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue688-v1-base", "issue688-v1"] BUILDS = ["release32"] SEARCHES = [ ("blind", ["--search", "astar(blind())"]), ("ipdb", ["--search", "astar(ipdb())"]), ("divpot", ["--search", "astar(diverse_potentials())"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), search, build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
1,229
24.625
62
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue688/v1-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue688-v1-base", "issue688-v1"] BUILDS = ["release32"] SEARCHES = [ ("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), ("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), ("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), search, build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] + [ IssueConfig( "lama-first-{build}".format(**locals()), [], build_options=[build], driver_options=["--build", build, "--alias", "lama-first"]) ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
1,519
27.148148
88
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue688/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,171
35.715026
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue688/v3-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue688-v3-base", "issue688-v3"] BUILDS = ["release32"] SEARCHES = [ ("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), ("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), ("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), search, build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] + [ IssueConfig( "lama-first-{build}".format(**locals()), [], build_options=[build], driver_options=["--build", build, "--alias", "lama-first"]) ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
1,525
27.259259
88
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue688/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue688/v3-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue688-v3-base", "issue688-v3"] BUILDS = ["release32"] SEARCHES = [ ("blind", ["--search", "astar(blind())"]), ("ipdb", ["--search", "astar(ipdb())"]), ("divpot", ["--search", "astar(diverse_potentials())"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), search, build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_report(RelativeScatterPlotReport( attributes=["search_time"], filter_algorithm=["issue688-v3-base-blind-release32", "issue688-v3-blind-release32"], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-blind-search_time.png".format(exp.name)) exp.run_steps()
1,566
27.490909
89
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue546/v1-sat-fdss.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue546-base", "issue546-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { "seq_sat_fdss_1": ["--alias", "seq-sat-fdss-1"], "seq_sat_fdss_2": ["--alias", "seq-sat-fdss-2"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step( attributes=common_setup.IssueExperiment.PORTFOLIO_ATTRIBUTES) exp()
568
19.321429
65
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue546/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in self.get_supported_attributes( config_nick, attributes): make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,856
34.913408
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue546/v1-opt-fdss.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue546-base", "issue546-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "seq_opt_fdss_1": ["--alias", "seq-opt-fdss-1"], "seq_opt_fdss_2": ["--alias", "seq-opt-fdss-2"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step( attributes=common_setup.IssueExperiment.PORTFOLIO_ATTRIBUTES) exp()
564
19.178571
65
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue546/v1-limits.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue546-v1"] LIMITS = {"search_time": 300, "search_memory": 1024} SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "blind-fd-limits": ["--search", "astar(blind())"], "blind-lab-limits": ["--search", "astar(blind())"], } class FastDownwardLimits(common_setup.IssueExperiment): def _make_search_runs(self): common_setup.IssueExperiment._make_search_runs(self) for run in self.runs: if "fd-limits" in run.properties["config_nick"]: # Move limits to fast-downward.py search_args, search_kwargs = run.commands["search"] time_limit = search_kwargs["time_limit"] mem_limit = search_kwargs["mem_limit"] del search_kwargs["time_limit"] del search_kwargs["mem_limit"] search_args.insert(1, "--search-timeout") search_args.insert(2, str(time_limit)) search_args.insert(3, "--search-memory") search_args.insert(4, str(mem_limit)) exp = FastDownwardLimits( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_absolute_report_step() exp()
1,289
29
67
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue849/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, geometric_mean from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue849-base", "issue849-v1"] BUILDS = ["release32"] CONFIG_NICKS = [ ('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), ('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), ('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), ] CONFIGS = [ IssueConfig( config_nick, config, build_options=[build], driver_options=["--build", build]) for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser('ms-parser.py') exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, ms_construction_time, ms_atomic_construction_time, ms_abstraction_constructed, ms_atomic_fts_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step(attributes=attributes) exp.run_steps()
4,069
41.395833
451
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue849/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) # TODO: replace above by below in future experiments parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float) parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_atomic_fts_constructed(content, props): ms_atomic_construction_time = props.get('ms_atomic_construction_time') ms_atomic_fts_constructed = False if ms_atomic_construction_time is not None: ms_atomic_fts_constructed = True props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed parser.add_function(check_atomic_fts_constructed) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'success' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) parser.parse()
2,958
39.534247
139
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue849/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,786
36.435443
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue849/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue414/opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from downward import suites import common_setup DIR = os.path.dirname(os.path.abspath(__file__)) REPO = os.path.dirname(os.path.dirname(DIR)) REVS = ["issue414-base", "issue414"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() # The aliases are adjusted for the respective driver scripts by lab. CONFIGS = { "ipdb": ["--search", "astar(ipdb())"], } for alias in ["seq-opt-bjolp", "seq-opt-fdss-1", "seq-opt-fdss-2", "seq-opt-lmcut", "seq-opt-merge-and-shrink"]: CONFIGS[alias] = ["--alias", alias] exp = common_setup.IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
770
19.837838
68
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue414/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" # TODO: Add something about errors/exit codes. DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for attribute in valid_attributes: name = "-".join([rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,677
35.431034
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue414/sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from downward import suites import common_setup DIR = os.path.dirname(os.path.abspath(__file__)) REPO = os.path.dirname(os.path.dirname(DIR)) REVS = ["issue414-base", "issue414"] LIMITS = {"search_time": 1800} SUITE = suites.suite_satisficing_with_ipc11() # The aliases are adjusted for the respective driver scripts by lab. CONFIGS = { "seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"], "seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"], "seq_sat_fdss_2": ["--alias", "seq-sat-fdss-2"], "lazy_greedy_ff": [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], } exp = common_setup.IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
830
20.307692
68
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue860/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue860-base", "issue860-v2"] BUILDS = ["release"] CONFIG_NICKS = [ ("astar-blind", ["--search", "astar(blind())"]), ("astar-lmcut", ["--search", "astar(lmcut())"]), # inconsistent heuristic to test re-opening ("bjolp", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), # test lazy evaluator ] CONFIGS = [ IssueConfig( config_nick, config, build_options=[build], driver_options=["--build", build]) for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_comparison_table_step() exp.run_steps()
1,874
26.985075
96
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue860/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue860-base", "issue860-v1"] BUILDS = ["release"] CONFIG_NICKS = [ ("astar-blind", ["--search", "astar(blind)"]), ] CONFIGS = [ IssueConfig( config_nick, config, build_options=[build], driver_options=["--build", build]) for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_absolute_report_step() exp.add_comparison_table_step() exp.run_steps()
1,630
24.888889
68
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue860/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,786
36.435443
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue860/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue481/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Wrapper for FastDownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions, configs, suite, grid_priority=None, path=None, test_suite=None, email=None, **kwargs): """Create a DownwardExperiment with some convenience features. If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(os.path.join(repo, "benchmarks"), suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append(( "{rev1}-{config_nick}".format(**locals()), "{rev2}-{config_nick}".format(**locals()), "Diff ({config_nick})".format(**locals()))) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "{name}-{rev1}-{rev2}-compare.html".format( name=self.name, **locals())) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
11,842
33.628655
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue481/v1-lama.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment REVS = ["issue481-base", "issue481-v1"] SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = [ IssueConfig("lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ] exp = IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, email="[email protected]" ) exp.add_comparison_table_step() exp()
472
17.192308
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue481/v1-opt-test.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment REVS = ["issue481-base", "issue481-v1"] SUITE = suites.suite_optimal_with_ipc11() CONFIGS = [ # Greedy (tests single and alternating open lists) IssueConfig("astar_lmcut", [ "--search", "astar(lmcut())" ]), ] exp = IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, email="[email protected]" ) exp.add_comparison_table_step() exp()
539
17
54
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue481/v1-sat-test.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment REVS = ["issue481-base", "issue481-v1"] SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = [ # Greedy (tests single and alternating open lists) IssueConfig("eager_greedy_ff", [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)" ]), IssueConfig("lazy_greedy_ff", [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)" ]), # Epsilon Greedy IssueConfig("lazy_epsilon_greedy_ff", [ "--heuristic", "h=ff()", "--search", "lazy(epsilon_greedy(h))" ]), # Pareto IssueConfig("lazy_pareto_ff_cea", [ "--heuristic", "h1=ff()", "--heuristic", "h2=cea()", "--search", "lazy(pareto([h1, h2]))" ]), # Single Buckets IssueConfig("lazy_single_buckets_ff", [ "--heuristic", "h=ff()", "--search", "lazy(single_buckets(h))" ]), # Type based (from issue455) IssueConfig("ff-type-const", [ "--heuristic", "hff=ff(cost_type=one)", "--search", "lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])])," "preferred=[hff],cost_type=one)" ]), IssueConfig("lama-first", [ "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))", "--search", "lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true)])," "preferred=[hff,hlm],cost_type=one)" ]), IssueConfig("lama-first-types-ff-g", [ "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))", "--search", "lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true), type_based([hff, g()])])," "preferred=[hff,hlm],cost_type=one)" ]), ] exp = IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, email="[email protected]" ) # Absolute report commented out because a comparison table is more useful for this issue. # (It's still in this file because someone might want to use it as a basis.) # Scatter plots commented out for now because I have no usable matplotlib available. # exp.add_absolute_report_step() exp.add_comparison_table_step() # exp.add_scatter_plot_step() exp()
2,665
28.955056
132
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue903/parser.py
#! /usr/bin/env python import logging import re from lab.parser import Parser class CommonParser(Parser): def add_difference(self, diff, val1, val2): def diff_func(content, props): if props.get(val1) is None or props.get(val2) is None: diff_val = None else: diff_val = props.get(val1) - props.get(val2) props[diff] = diff_val self.add_function(diff_func) def _get_flags(self, flags_string): flags = 0 for char in flags_string: flags |= getattr(re, char) return flags def add_repeated_pattern( self, name, regex, file="run.log", required=False, type=int, flags=""): def find_all_occurences(content, props): matches = re.findall(regex, content, flags=self._get_flags(flags)) if required and not matches: logging.error("Pattern {0} not found in file {1}".format(regex, file)) props[name] = [type(m) for m in matches] self.add_function(find_all_occurences, file=file) def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""): Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags) def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""): def search_from_bottom(content, props): reversed_content = "\n".join(reversed(content.splitlines())) match = re.search(regex, reversed_content, flags=self._get_flags(flags)) if required and not match: logging.error("Pattern {0} not found in file {1}".format(regex, file)) if match: props[name] = type(match.group(1)) self.add_function(search_from_bottom, file=file) def no_search(content, props): if "search_start_time" not in props: error = props.get("error") if error is not None and error != "incomplete-search-found-no-plan": props["error"] = "no-search-due-to-" + error REFINEMENT_ATTRIBUTES = [ ("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"), ("time_for_finding_flaws", r"Time for finding flaws: (.+)s"), ("time_for_splitting_states", r"Time for splitting states: (.+)s"), ] def compute_total_times(content, props): for attribute, pattern in REFINEMENT_ATTRIBUTES: props["total_" + attribute] = sum(props[attribute]) def add_time_analysis(content, props): init_time = props.get("init_time") if not init_time: return parts = [] parts.append("{init_time:.2f}:".format(**props)) for attribute, pattern in REFINEMENT_ATTRIBUTES: time = props["total_" + attribute] relative_time = time / init_time print time, type(time) parts.append("{:.2f} ({:.2f})".format(time, relative_time)) props["time_analysis"] = " ".join(parts) def main(): parser = CommonParser() parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float) parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int) parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float) parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int) for attribute, pattern in REFINEMENT_ATTRIBUTES: parser.add_repeated_pattern(attribute, pattern, type=float, required=False) parser.add_function(no_search) parser.add_function(compute_total_times) parser.add_function(add_time_analysis) parser.parse() if __name__ == "__main__": main()
3,743
34.657143
109
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue903/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue903-base", "issue903-v1"] DRIVER_OPTIONS = ["--overall-time-limit", "5m"] CONFIGS = [ IssueConfig( "cegar-original-1M", ["--search", "astar(cegar(subtasks=[original()], max_transitions=1M, max_time=infinity))"], driver_options=DRIVER_OPTIONS), IssueConfig( "cegar-lm-goals-1M", ["--search", "astar(cegar(subtasks=[landmarks(), goals()], max_transitions=1M, max_time=infinity))"], driver_options=DRIVER_OPTIONS), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser(os.path.join(DIR, "parser.py")) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') REFINEMENT_ATTRIBUTES = [ "time_for_finding_traces", "time_for_finding_flaws", "time_for_splitting_states", ] attributes = ( IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["search_start_memory", "init_time", "time_analysis"] + ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) #exp.add_absolute_report_step(attributes=attributes) exp.add_comparison_table_step(attributes=attributes) exp.add_scatter_plot_step( relative=True, attributes=["init_time", "search_time", "total_time"]) exp.run_steps()
2,222
29.875
109
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue903/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = get_algo_nick(rev1, config_nick) algo2 = get_algo_nick(rev2, config_nick) report = report_class( filter_algorithm=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"]) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,743
36.42132
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue903/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if not val1 or not val2: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,867
35.490566
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue666/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os import suites from lab.reports import Attribute, gm from common_setup import IssueConfig, IssueExperiment try: from relativescatter import RelativeScatterPlotReport matplotlib = True except ImportError: print 'matplotlib not availabe, scatter plots not available' matplotlib = False def main(revisions=None): benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') suite=suites.suite_optimal_strips() configs = { IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), } exp = IssueExperiment( benchmarks_dir=benchmarks_dir, suite=suite, revisions=revisions, configs=configs, test_suite=['depot:p01.pddl'], processes=4, email='[email protected]', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step() #if matplotlib: #for attribute in ["memory", "total_time"]: #for config in configs: #exp.add_report( #RelativeScatterPlotReport( #attributes=[attribute], #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], #get_category=lambda run1, run2: run1.get("domain"), #), #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) #) exp() main(revisions=['issue666-base', 'issue666-v1'])
5,710
61.758242
355
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue666/suites.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import argparse import textwrap HELP = "Convert suite name to list of domains or tasks." def suite_alternative_formulations(): return ['airport-adl', 'no-mprime', 'no-mystery'] def suite_ipc98_to_ipc04_adl(): return [ 'assembly', 'miconic-fulladl', 'miconic-simpleadl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'schedule', ] def suite_ipc98_to_ipc04_strips(): return [ 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', 'satellite', 'zenotravel', ] def suite_ipc98_to_ipc04(): # All IPC1-4 domains, including the trivial Movie. return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) def suite_ipc06_adl(): return [ 'openstacks', 'pathways', 'trucks', ] def suite_ipc06_strips_compilations(): return [ 'openstacks-strips', 'pathways-noneg', 'trucks-strips', ] def suite_ipc06_strips(): return [ 'pipesworld-tankage', 'rovers', 'storage', 'tpp', ] def suite_ipc06(): return sorted(suite_ipc06_adl() + suite_ipc06_strips()) def suite_ipc08_common_strips(): return [ 'parcprinter-08-strips', 'pegsol-08-strips', 'scanalyzer-08-strips', ] def suite_ipc08_opt_adl(): return ['openstacks-opt08-adl'] def suite_ipc08_opt_strips(): return sorted(suite_ipc08_common_strips() + [ 'elevators-opt08-strips', 'openstacks-opt08-strips', 'sokoban-opt08-strips', 'transport-opt08-strips', 'woodworking-opt08-strips', ]) def suite_ipc08_opt(): return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) def suite_ipc08_sat_adl(): return ['openstacks-sat08-adl'] def suite_ipc08_sat_strips(): return sorted(suite_ipc08_common_strips() + [ # Note: cyber-security is missing. 'elevators-sat08-strips', 'openstacks-sat08-strips', 'sokoban-sat08-strips', 'transport-sat08-strips', 'woodworking-sat08-strips', ]) def suite_ipc08_sat(): return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) def suite_ipc08(): return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) def suite_ipc11_opt(): return [ 'barman-opt11-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'nomystery-opt11-strips', 'openstacks-opt11-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'pegsol-opt11-strips', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'tidybot-opt11-strips', 'transport-opt11-strips', 'visitall-opt11-strips', 'woodworking-opt11-strips', ] def suite_ipc11_sat(): return [ 'barman-sat11-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'nomystery-sat11-strips', 'openstacks-sat11-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'pegsol-sat11-strips', 'scanalyzer-sat11-strips', 'sokoban-sat11-strips', 'tidybot-sat11-strips', 'transport-sat11-strips', 'visitall-sat11-strips', 'woodworking-sat11-strips', ] def suite_ipc11(): return sorted(suite_ipc11_opt() + suite_ipc11_sat()) def suite_ipc14_agl_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_agl_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-agl14-strips', 'openstacks-agl14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_agl(): return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) def suite_ipc14_mco_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_mco_strips(): return [ 'barman-mco14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-mco14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_mco(): return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) def suite_ipc14_opt_adl(): return [ 'cavediving-14-adl', 'citycar-opt14-adl', 'maintenance-opt14-adl', ] def suite_ipc14_opt_strips(): return [ 'barman-opt14-strips', 'childsnack-opt14-strips', 'floortile-opt14-strips', 'ged-opt14-strips', 'hiking-opt14-strips', 'openstacks-opt14-strips', 'parking-opt14-strips', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'transport-opt14-strips', 'visitall-opt14-strips', ] def suite_ipc14_opt(): return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) def suite_ipc14_sat_adl(): return [ 'cavediving-14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_sat_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_sat(): return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) def suite_ipc14(): return sorted(set( suite_ipc14_agl() + suite_ipc14_mco() + suite_ipc14_opt() + suite_ipc14_sat())) def suite_unsolvable(): return sorted( ['mystery:prob%02d.pddl' % index for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) def suite_optimal_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) def suite_optimal_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + suite_ipc11_opt() + suite_ipc14_opt_strips()) def suite_optimal(): return sorted(suite_optimal_adl() + suite_optimal_strips()) def suite_satisficing_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) def suite_satisficing_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + suite_ipc11_sat() + suite_ipc14_sat_strips()) def suite_satisficing(): return sorted(suite_satisficing_adl() + suite_satisficing_strips()) def suite_all(): return sorted( suite_ipc98_to_ipc04() + suite_ipc06() + suite_ipc06_strips_compilations() + suite_ipc08() + suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("suite", help="suite name") return parser.parse_args() def main(): prefix = "suite_" suite_names = [ name[len(prefix):] for name in sorted(globals().keys()) if name.startswith(prefix)] parser = argparse.ArgumentParser(description=HELP) parser.add_argument("suite", choices=suite_names, help="suite name") parser.add_argument( "--width", default=72, type=int, help="output line width (default: %(default)s). Use 1 for single " "column.") args = parser.parse_args() suite_func = globals()[prefix + args.suite] print(textwrap.fill( str(suite_func()), width=args.width, break_long_words=False, break_on_hyphens=False)) if __name__ == "__main__": main()
8,551
23.364672
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue666/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'none' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) def check_proved_unsolvability(content, props): proved_unsolvability = False if props['coverage'] == 0: for line in content.splitlines(): if line == 'Completely explored state space -- no solution!': proved_unsolvability = True break props['proved_unsolvability'] = proved_unsolvability parser.add_function(check_proved_unsolvability) parser.parse()
2,784
37.150685
135
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue666/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=None, **kwargs): """ If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(benchmarks_dir, suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,496
33.907821
83
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue666/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
35.654206
84
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v7-sat-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v7"] BUILDS = ["release32", "release64"] CONFIG_DICT = { "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy([h], preferred=[h])"], "eager_greedy_cea": [ "--heuristic", "h=cea()", "--search", "eager_greedy([h], preferred=[h])"], "lazy_greedy_add": [ "--heuristic", "h=add()", "--search", "lazy_greedy([h], preferred=[h])"], "lazy_greedy_cg": [ "--heuristic", "h=cg()", "--search", "lazy_greedy([h], preferred=[h])"], "lama-first": [ "--heuristic", """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), transform=adapt_costs(one))""", "--heuristic", "hff=ff_synergy(hlm)", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], "ff-typed": [ "--heuristic", "hff=ff()", "--search", "lazy(alt([single(hff), single(hff, pref_only=true)," " type_based([hff, g()])], boost=1000)," " preferred=[hff], cost_type=one)"], } CONFIGS = [ IssueConfig( "-".join([config_nick, build]), config, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_DICT.items() ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, _ in CONFIG_DICT.items()] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) exp.run_steps()
3,347
30
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v4-blind.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v1", "issue213-v3", "issue213-v4"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() attributes = [ "coverage", "error", "expansions_until_last_jump", "memory", "score_memory", "total_time", "score_total_time", "hash_set_load_factor", "hash_set_resizings"] # Compare revisions. # lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 # lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals())) # Compare builds. # lmcut-base-32 vs. lmcut-base-64 # lmcut-v1-32 vs. lmcut-v1-64 # lmcut-v3-32 vs. lmcut v3-64 for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) for attribute in ["total_time", "memory"]: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]), name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute)) exp.run_steps()
2,930
32.306818
93
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v8-sat-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["release32", "release64"] CONFIG_DICT = { "eager_greedy_ff": [ "--evaluator", "h=ff()", "--search", "eager_greedy([h], preferred=[h])"], "eager_greedy_cea": [ "--evaluator", "h=cea()", "--search", "eager_greedy([h], preferred=[h])"], "lazy_greedy_add": [ "--evaluator", "h=add()", "--search", "lazy_greedy([h], preferred=[h])"], "lazy_greedy_cg": [ "--evaluator", "h=cg()", "--search", "lazy_greedy([h], preferred=[h])"], "lama-first": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], "lama-first-typed": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", "lazy(alt([single(hff), single(hff, pref_only=true)," "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," "preferred_successors_first=false)"], } CONFIGS = [ IssueConfig( "-".join([config_nick, build]), config, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_DICT.items() ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick in CONFIG_DICT.keys()] outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), outfile=outfile) exp.add_step( 'publish-report', subprocess.call, ['publish', outfile]) exp.run_steps()
3,818
32.208696
103
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v8-opt-5min-debug.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["debug64"] SEARCHES = [ ("bjolp", [ "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), ("blind", ["--search", "astar(blind())"]), ("cegar", ["--search", "astar(cegar())"]), ("divpot", ["--search", "astar(diverse_potentials())"]), ("ipdb", ["--search", "astar(ipdb())"]), ("lmcut", ["--search", "astar(lmcut())"]), ("mas", [ "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," " merge_strategy=merge_sccs(order_of_sccs=topological," " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," " label_reduction=exact(before_shrinking=true, before_merging=false)," " max_states=50000, threshold_before_merge=1))"]), ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), ("h2", ["--search", "astar(hm(m=2))"]), ("hmax", ["--search", "astar(hmax())"]), ] CONFIGS = [ IssueConfig( "-".join([search_nick, build]), search, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS for search_nick, search in SEARCHES ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_absolute_report_step() #exp.add_comparison_table_step() exp.run_steps()
2,648
32.1125
112
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v5-blind.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v1", "issue213-v4", "issue213-v5"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = ["pegsol-opt11-strips"] # common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')]) exp.add_absolute_report_step() attributes = [ "coverage", "error", "expansions_until_last_jump", "memory", "score_memory", "total_time", "score_total_time", "hash_set_load_factor", "hash_set_resizings"] # Compare revisions. # lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 # lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals())) # Compare builds. # lmcut-base-32 vs. lmcut-base-64 # lmcut-v1-32 vs. lmcut-v1-64 # lmcut-v3-32 vs. lmcut v3-64 for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) for attribute in ["total_time", "memory"]: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]), name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute)) exp.run_steps()
3,084
33.277778
93
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v7-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v7"] BUILDS = ["release32", "release64"] CONFIG_DICT = { "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy([h], preferred=[h])"], "eager_greedy_cea": [ "--heuristic", "h=cea()", "--search", "eager_greedy([h], preferred=[h])"], "lazy_greedy_add": [ "--heuristic", "h=add()", "--search", "lazy_greedy([h], preferred=[h])"], "lazy_greedy_cg": [ "--heuristic", "h=cg()", "--search", "lazy_greedy([h], preferred=[h])"], "lama-first": [ "--heuristic", """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), transform=adapt_costs(one))""", "--heuristic", "hff=ff_synergy(hlm)", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], } CONFIGS = [ IssueConfig( "-".join([config_nick, build]), config, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_DICT.items() ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, _ in CONFIG_DICT.items()] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) exp.run_steps()
3,093
29.333333
76
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v2-blind-m64.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v1", "issue213-v2"] BUILDS = ["release64"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(attributes=["total_time", "memory"]) exp.run_steps()
1,206
23.14
62
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v8-opt-5min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["release32", "release64"] SEARCHES = [ ("bjolp", [ "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), ("blind", ["--search", "astar(blind())"]), ("cegar", ["--search", "astar(cegar())"]), ("divpot", ["--search", "astar(diverse_potentials())"]), ("ipdb", ["--search", "astar(ipdb())"]), ("lmcut", ["--search", "astar(lmcut())"]), ("mas", [ "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," " merge_strategy=merge_sccs(order_of_sccs=topological," " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," " label_reduction=exact(before_shrinking=true, before_merging=false)," " max_states=50000, threshold_before_merge=1))"]), ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), ("h2", ["--search", "astar(hm(m=2))"]), ("hmax", ["--search", "astar(hmax())"]), ] CONFIGS = [ IssueConfig( "-".join([search_nick, build]), search, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS for search_nick, search in SEARCHES ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, _ in SEARCHES] outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), outfile=outfile) exp.add_step( 'publish-report', subprocess.call, ['publish', outfile]) exp.run_steps()
3,401
34.072165
112
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/base-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISION = "issue213-base" BUILDS = ["release32", "release64"] SEARCHES = [ ("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"), ("blind", "astar(blind())"), ("cegar", "astar(cegar())"), ("divpot", "astar(diverse_potentials())"), ("ipdb", "astar(ipdb(max_time=900))"), ("lmcut", "astar(lmcut())"), ("mas", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false), " "merge_strategy=merge_dfp(), " "label_reduction=exact(before_shrinking=true, before_merging=false), max_states=100000, threshold_before_merge=1))"), ("seq", "astar(operatorcounting([state_equation_constraints()]))"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=[REVISION], configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() algorithm_pairs = [] for build1, build2 in itertools.combinations(BUILDS, 2): for config_nick, search in SEARCHES: algorithm_pairs.append( ("{REVISION}-{config_nick}-{build1}".format(**locals()), "{REVISION}-{config_nick}-{build2}".format(**locals()), "Diff ({})".format(config_nick))) exp.add_report( ComparativeReport( algorithm_pairs, attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), name="issue213-opt-comparison") #exp.add_scatter_plot_step(attributes=["total_time", "memory"]) exp.run_steps()
2,257
30.361111
125
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v8-lama-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( "lama-" + build, [], build_options=[build], driver_options=["--build", build, "--alias", "lama"]) for rev in REVISIONS for build in BUILDS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.PORTFOLIO_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick in ["lama"]] outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), outfile=outfile) exp.add_step( 'publish-report', subprocess.call, ['publish', outfile]) exp.run_steps()
2,252
28.644737
102
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v7-lama-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v7"] BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( "lama-" + build, [], build_options=[build], driver_options=["--build", build, "--alias", "seq-sat-lama-2011"]) for rev in REVISIONS for build in BUILDS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick in ["lama"]] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) exp.run_steps()
2,070
28.169014
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v7-opt-extra-configs.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v7"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind-sss-simple", "astar(blind(), pruning=stubborn_sets_simple())"), ("blind-sss-ec", "astar(blind(), pruning=stubborn_sets_ec())"), ("h2", "astar(hm(m=2))"), ("hmax", "astar(hmax())"), ] CONFIGS = [ IssueConfig( "-".join([search_nick, build]), ["--search", search], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS for search_nick, search in SEARCHES ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{search_nick}-{build1}".format(**locals()), "{rev}-{search_nick}-{build2}".format(**locals()), "Diff ({search_nick}-{rev})".format(**locals())) for search_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) exp.run_steps()
2,390
29.265823
75
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v8-opt-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["release32", "release64"] SEARCHES = [ ("bjolp", [ "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), ("blind", ["--search", "astar(blind())"]), ("cegar", ["--search", "astar(cegar())"]), ("divpot", ["--search", "astar(diverse_potentials())"]), ("ipdb", ["--search", "astar(ipdb())"]), ("lmcut", ["--search", "astar(lmcut())"]), ("mas", [ "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," " merge_strategy=merge_sccs(order_of_sccs=topological," " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," " label_reduction=exact(before_shrinking=true, before_merging=false)," " max_states=50000, threshold_before_merge=1))"]), ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), ("h2", ["--search", "astar(hm(m=2))"]), ("hmax", ["--search", "astar(hmax())"]), ] CONFIGS = [ IssueConfig( "-".join([search_nick, build]), search, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "30m"]) for rev in REVISIONS for build in BUILDS for search_nick, search in SEARCHES ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, _ in SEARCHES] outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), outfile=outfile) exp.add_step( 'publish-report', subprocess.call, ['publish', outfile]) exp.run_steps()
3,402
34.082474
112
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v2-blind-m32.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v1", "issue213-v2"] BUILDS = ["release32"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(attributes=["total_time", "memory"]) exp.run_steps()
1,206
23.14
62
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v7-opt-5min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v7"] BUILDS = ["release32", "release64"] SEARCHES = [ ("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"), ("blind", "astar(blind())"), ("cegar", "astar(cegar())"), ("divpot", "astar(diverse_potentials())"), ("ipdb", "astar(ipdb())"), ("lmcut", "astar(lmcut())"), ("mas", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," " merge_strategy=merge_sccs(order_of_sccs=topological," " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," " label_reduction=exact(before_shrinking=true, before_merging=false)," " max_states=50000, threshold_before_merge=1))"), ("occ", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"), ("blind-sss-simple", "astar(blind(), pruning=stubborn_sets_simple())"), ("blind-sss-ec", "astar(blind(), pruning=stubborn_sets_ec())"), ("h2", "astar(hm(m=2))"), ("hmax", "astar(hmax())"), ] CONFIGS = [ IssueConfig( "-".join([search_nick, build]), ["--search", search], build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS for search_nick, search in SEARCHES ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): algorithm_pairs = [ ("{rev}-{search_nick}-{build1}".format(**locals()), "{rev}-{search_nick}-{build2}".format(**locals()), "Diff ({search_nick}-{rev})".format(**locals())) for search_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) exp.run_steps()
3,167
33.434783
103
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v8-lama-5min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( "lama-" + build, [], build_options=[build], driver_options=["--build", build, "--alias", "lama", "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.PORTFOLIO_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick in ["lama"]] outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), outfile=outfile) exp.add_step( 'publish-report', subprocess.call, ['publish', outfile]) exp.run_steps()
2,282
29.039474
102
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v7-lama-first-pref-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v7"] BUILDS = ["release32", "release64"] CONFIG_NICKS = [ ("lama-first-syn", [ "--heuristic", """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), transform=adapt_costs(one))""", "--heuristic", "hff=ff_synergy(hlm)", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), ("lama-first-no-syn-pref-false", [ "--heuristic", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one), pref=false)", "--heuristic", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), ("lama-first-no-syn-pref-true", [ "--heuristic", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one), pref=true)", "--heuristic", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), ] CONFIGS = [ IssueConfig( config_nick + "-" + build, config, build_options=[build], driver_options=["--build", build]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, _ in CONFIG_NICKS] print algorithm_pairs exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) exp.run_steps()
3,344
34.210526
127
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "quality", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,782
36.425316
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v7-lama-5min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v7"] BUILDS = ["release32", "release64"] CONFIGS = [ IssueConfig( "lama-" + build, [], build_options=[build], driver_options=["--build", build, "--alias", "seq-sat-lama-2011", "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.ANYTIME_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick in ["lama"]] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) exp.run_steps()
2,100
28.591549
104
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v8-sat-5min-debug.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["debug64"] CONFIG_DICT = { "eager_greedy_ff": [ "--evaluator", "h=ff()", "--search", "eager_greedy([h], preferred=[h])"], "eager_greedy_cea": [ "--evaluator", "h=cea()", "--search", "eager_greedy([h], preferred=[h])"], "lazy_greedy_add": [ "--evaluator", "h=add()", "--search", "lazy_greedy([h], preferred=[h])"], "lazy_greedy_cg": [ "--evaluator", "h=cg()", "--search", "lazy_greedy([h], preferred=[h])"], "lama-first": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], "lama-first-typed": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", "lazy(alt([single(hff), single(hff, pref_only=true)," "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," "preferred_successors_first=false)"], } CONFIGS = [ IssueConfig( "-".join([config_nick, build]), config, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_DICT.items() ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") exp.add_absolute_report_step() #exp.add_comparison_table_step() exp.run_steps()
3,057
30.204082
103
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v6-blind.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v1", "issue213-v5", "issue213-v6"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')]) exp.add_absolute_report_step() attributes = [ "coverage", "error", "expansions_until_last_jump", "memory", "score_memory", "total_time", "score_total_time", "hash_set_load_factor", "hash_set_resizings"] # Compare revisions. # lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 # lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 for build in BUILDS: for rev1, rev2 in itertools.combinations(REVISIONS, 2): algorithm_pairs = [ ("{rev1}-{config_nick}-{build}".format(**locals()), "{rev2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals())) # Compare builds. # lmcut-base-32 vs. lmcut-base-64 # lmcut-v1-32 vs. lmcut-v1-64 # lmcut-v3-32 vs. lmcut v3-64 for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, search in SEARCHES] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) for attribute in ["total_time", "memory"]: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]), name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute)) exp.run_steps()
3,057
32.977778
93
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v8-sat-5min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os import subprocess from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport EXPNAME = common_setup.get_experiment_name() DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v8"] BUILDS = ["release32", "release64"] CONFIG_DICT = { "eager_greedy_ff": [ "--evaluator", "h=ff()", "--search", "eager_greedy([h], preferred=[h])"], "eager_greedy_cea": [ "--evaluator", "h=cea()", "--search", "eager_greedy([h], preferred=[h])"], "lazy_greedy_add": [ "--evaluator", "h=add()", "--search", "lazy_greedy([h], preferred=[h])"], "lazy_greedy_cg": [ "--evaluator", "h=cg()", "--search", "lazy_greedy([h], preferred=[h])"], "lama-first": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], "lama-first-typed": [ "--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", "lazy(alt([single(hff), single(hff, pref_only=true)," "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," "preferred_successors_first=false)"], } CONFIGS = [ IssueConfig( "-".join([config_nick, build]), config, build_options=[build], driver_options=["--build", build, "--overall-time-limit", "5m"]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_DICT.items() ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step("build", exp.build) exp.add_step("start", exp.start_runs) exp.add_fetcher(name="fetch") #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): for rev in REVISIONS: algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, _ in CONFIG_DICT.items()] outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), outfile=outfile) exp.add_step( 'publish-report', subprocess.call, ['publish', outfile]) exp.run_steps()
3,821
32.234783
103
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v7-lama-first-30min.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v7"] BUILDS = ["release32", "release64"] CONFIG_NICKS = [ ("lama-first-syn", [ "--heuristic", """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), transform=adapt_costs(one))""", "--heuristic", "hff=ff_synergy(hlm)", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), ("lama-first-no-syn", [ "--heuristic", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one))", "--heuristic", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), ] CONFIGS = [ IssueConfig( config_nick + "-" + build, config, build_options=[build], driver_options=["--build", build]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES # Compare builds. for build1, build2 in itertools.combinations(BUILDS, 2): algorithm_pairs = [ ("{rev}-{config_nick}-{build1}".format(**locals()), "{rev}-{config_nick}-{build2}".format(**locals()), "Diff ({config_nick}-{rev})".format(**locals())) for config_nick, _ in CONFIG_NICKS] print algorithm_pairs exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) exp.run_steps()
2,914
31.752809
115
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue213/v3-blind.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue213-v2", "issue213-v3"] BUILDS = ["release32", "release64"] SEARCHES = [ ("blind", "astar(blind())"), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), ["--search", search], build_options=[build], driver_options=["--build", build]) for nick, search in SEARCHES for build in BUILDS ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() algorithm_pairs = [] revision1, revision2 = REVISIONS for build in BUILDS: for config_nick, search in SEARCHES: algorithm_pairs.append( ("{revision1}-{config_nick}-{build}".format(**locals()), "{revision2}-{config_nick}-{build}".format(**locals()), "Diff ({config_nick}-{build})".format(**locals()))) exp.add_report( ComparativeReport( algorithm_pairs, attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), name="issue213-v2-vs-v3-blind") exp.run_steps()
1,654
25.693548
72
py