repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
DAAISy
DAAISy-main/dependencies/FD/experiments/issue710/ipdb-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('hc_iterations', 'iPDB: iterations = (\d+)', required=False, type=int) parser.add_pattern('hc_num_patters', 'iPDB: number of patterns = (\d+)', required=False, type=int) parser.add_pattern('hc_size', 'iPDB: size = (\d+)', required=False, type=int) parser.add_pattern('hc_num_generated', 'iPDB: generated = (\d+)', required=False, type=int) parser.add_pattern('hc_num_rejected', 'iPDB: rejected = (\d+)', required=False, type=int) parser.add_pattern('hc_max_pdb_size', 'iPDB: maximum pdb size = (\d+)', required=False, type=int) parser.add_pattern('hc_hill_climbing_time', 'iPDB: hill climbing time: (.+)s', required=False, type=float) parser.add_pattern('hc_total_time', 'Pattern generation \(hill climbing\) time: (.+)s', required=False, type=float) parser.add_pattern('cpdbs_time', 'PDB collection construction time: (.+)s', required=False, type=float) def check_hc_constructed(content, props): hc_time = props.get('hc_total_time') abstraction_constructed = False if hc_time is not None: abstraction_constructed = True props['hc_abstraction_constructed'] = abstraction_constructed parser.add_function(check_hc_constructed) def check_planner_exit_reason(content, props): hc_abstraction_constructed = props.get('hc_abstraction_constructed') error = props.get('error') if error != 'none' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether hill climbing computation or search ran out of # time or memory. hc_out_of_time = False hc_out_of_memory = False search_out_of_time = False search_out_of_memory = False if hc_abstraction_constructed == False: if error == 'timeout': hc_out_of_time = True elif error == 'out-of-memory': hc_out_of_memory = True elif hc_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['hc_out_of_time'] = hc_out_of_time props['hc_out_of_memory'] = hc_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) parser.parse()
2,375
41.428571
115
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue659/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from common_setup import IssueConfig, IssueExperiment, is_test_run BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue659-v1-base", "issue659-v1"] CONFIGS = [ IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) for heuristic in [ "cegar(max_states=10000,max_time=infinity)"] ] SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp.add_scatter_plot_step(attributes=["total_time"]) exp()
2,139
35.896552
74
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue659/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step( 'publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step( "publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step(step_name, make_scatter_plots))
11,446
33.068452
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue659/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
35.654206
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/v2-translate.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue739-v2"] CONFIGS = [ IssueConfig('translate', [], driver_options=['--translate']), IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) del exp.commands['parse-search'] exp.add_absolute_report_step(attributes=['translator_*', 'error']) exp.run_steps()
1,236
32.432432
114
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/v2-search.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue739-base", "issue739-v2"] CONFIGS = [ IssueConfig('search-time-limit', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '20s']), IssueConfig('search-memory-limit', ['--search', 'astar(blind())'], driver_options=['--search-memory-limit', '100M']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource('exit_code_converter_parser', 'exit-code-converter-parser.py', dest='exit-code-converter-parser.py') exp.add_command('exit-code-converter-parser', ['{exit_code_converter_parser}']) exp.add_comparison_table_step() exp.run_steps()
1,332
35.027027
121
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue739-base", "issue739-v1"] CONFIGS = [ IssueConfig('translate', [], driver_options=['--translate']), IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), IssueConfig('search-time-limit', ['--search', 'astar(lmcut())'], driver_options=['--search-time-limit', '20s']), IssueConfig('search-memory-limit', ['--search', 'astar(lmcut())'], driver_options=['--search-memory-limit', '100M']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource('exit_code_converter_parser', 'exit-code-converter-parser.py', dest='exit-code-converter-parser.py') exp.add_command('exit-code-converter-parser', ['{exit_code_converter_parser}']) exp.add_comparison_table_step() exp.run_steps()
1,622
39.575
121
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/v4-translate.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue739-v4"] CONFIGS = [ IssueConfig('translate', [], driver_options=['--translate']), IssueConfig('translate-with-options', ['--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']), IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) del exp.commands['parse-search'] exp.add_absolute_report_step(attributes=['translator_*', 'error']) exp.run_steps()
1,392
35.657895
155
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/exit-code-converter-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() LEGACY_TO_NEW_EXIT_CODES = { 'critical-error': 'search-critical-error', 'input-error': 'search-input-error', 'unsupported-feature-requested': 'search-unsupported', 'unsolvable': 'search-unsolvable', 'incomplete-search-found-no-plan': 'search-unsolvable-incomplete', 'out-of-memory': 'search-out-of-memory', 'timeout': 'search-out-of-time', 'timeout-and-out-of-memory': 'search-out-of-memory-and-time', } def convert_legacy_to_new_exit_codes(content, props): error = props['error'] if error in LEGACY_TO_NEW_EXIT_CODES: props['error'] = LEGACY_TO_NEW_EXIT_CODES[error] parser.add_function(convert_legacy_to_new_exit_codes) parser.parse()
762
28.346154
70
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/v5-translate.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue739-v5"] CONFIGS = [ IssueConfig('translate', [], driver_options=['--translate']), IssueConfig('translate-with-options', ['--translate-options', '--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']), IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER) exp.add_parser(exp.LAB_DRIVER_PARSER) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_absolute_report_step(attributes=['translator_*', 'error']) exp.run_steps()
1,647
34.06383
178
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/v3-translate.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue739-v3"] CONFIGS = [ IssueConfig('translate', [], driver_options=['--translate']), IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) del exp.commands['parse-search'] exp.add_absolute_report_step(attributes=['translator_*', 'error']) exp.run_steps()
1,236
32.432432
114
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport try: from relativescatter import RelativeScatterPlotReport matplotlib = True except ImportError: print 'matplotlib not availabe, scatter plots not available' matplotlib = False def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if matplotlib: if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,435
36.11054
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/v4-translate-with-options.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue739-v4"] CONFIGS = [ IssueConfig('translate-with-options', ['--translate-options', '--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) del exp.commands['parse-search'] exp.add_absolute_report_step(attributes=['translator_*', 'error']) exp.run_steps()
1,125
31.171429
178
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue739/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue632/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport import suites configs = [ IssueConfig( "cegar-10K-original", ["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]), ] revisions = ["issue632-base", "issue632-v1"] exp = IssueExperiment( revisions=revisions, configs=configs, suite=suites.suite_optimal_with_ipc11(), test_suite=["depot:pfile1"], email="[email protected]", ) exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) ) exp()
1,032
26.184211
96
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue632/v1-landmarks-goals.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport import suites configs = [ IssueConfig( "cegar-900s", ["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"]), ] revisions = ["issue632-base", "issue632-v1"] exp = IssueExperiment( revisions=revisions, configs=configs, suite=suites.suite_optimal_with_ipc11(), test_suite=["depot:pfile1"], email="[email protected]", ) exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) ) exp()
1,011
25.631579
86
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue632/suites.py
# Benchmark suites from the Fast Downward benchmark collection. def suite_alternative_formulations(): return ['airport-adl', 'no-mprime', 'no-mystery'] def suite_ipc98_to_ipc04_adl(): return [ 'assembly', 'miconic-fulladl', 'miconic-simpleadl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'schedule', ] def suite_ipc98_to_ipc04_strips(): return [ 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', 'satellite', 'zenotravel', ] def suite_ipc98_to_ipc04(): # All IPC1-4 domains, including the trivial Movie. return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) def suite_ipc06_adl(): return [ 'openstacks', 'pathways', 'trucks', ] def suite_ipc06_strips_compilations(): return [ 'openstacks-strips', 'pathways-noneg', 'trucks-strips', ] def suite_ipc06_strips(): return [ 'pipesworld-tankage', 'rovers', 'storage', 'tpp', ] def suite_ipc06(): return sorted(suite_ipc06_adl() + suite_ipc06_strips()) def suite_ipc08_common_strips(): return [ 'parcprinter-08-strips', 'pegsol-08-strips', 'scanalyzer-08-strips', ] def suite_ipc08_opt_adl(): return ['openstacks-opt08-adl'] def suite_ipc08_opt_strips(): return sorted(suite_ipc08_common_strips() + [ 'elevators-opt08-strips', 'openstacks-opt08-strips', 'sokoban-opt08-strips', 'transport-opt08-strips', 'woodworking-opt08-strips', ]) def suite_ipc08_opt(): return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) def suite_ipc08_sat_adl(): return ['openstacks-sat08-adl'] def suite_ipc08_sat_strips(): return sorted(suite_ipc08_common_strips() + [ # Note: cyber-security is missing. 'elevators-sat08-strips', 'openstacks-sat08-strips', 'sokoban-sat08-strips', 'transport-sat08-strips', 'woodworking-sat08-strips', ]) def suite_ipc08_sat(): return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) def suite_ipc08(): return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) def suite_ipc11_opt(): return [ 'barman-opt11-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'nomystery-opt11-strips', 'openstacks-opt11-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'pegsol-opt11-strips', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', 'tidybot-opt11-strips', 'transport-opt11-strips', 'visitall-opt11-strips', 'woodworking-opt11-strips', ] def suite_ipc11_sat(): return [ 'barman-sat11-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'nomystery-sat11-strips', 'openstacks-sat11-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'pegsol-sat11-strips', 'scanalyzer-sat11-strips', 'sokoban-sat11-strips', 'tidybot-sat11-strips', 'transport-sat11-strips', 'visitall-sat11-strips', 'woodworking-sat11-strips', ] def suite_ipc11(): return sorted(suite_ipc11_opt() + suite_ipc11_sat()) def suite_ipc14_agl_adl(): return [ 'cavediving-agl14-adl', 'citycar-agl14-adl', 'maintenance-agl14-adl', ] def suite_ipc14_agl_strips(): return [ 'barman-agl14-strips', 'childsnack-agl14-strips', 'floortile-agl14-strips', 'ged-agl14-strips', 'hiking-agl14-strips', 'openstacks-agl14-strips', 'parking-agl14-strips', 'tetris-agl14-strips', 'thoughtful-agl14-strips', 'transport-agl14-strips', 'visitall-agl14-strips', ] def suite_ipc14_agl(): return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) def suite_ipc14_mco_adl(): return [ 'cavediving-mco14-adl', 'citycar-mco14-adl', 'maintenance-mco14-adl', ] def suite_ipc14_mco_strips(): return [ 'barman-mco14-strips', 'childsnack-mco14-strips', 'floortile-mco14-strips', 'ged-mco14-strips', 'hiking-mco14-strips', 'openstacks-mco14-strips', 'parking-mco14-strips', 'tetris-mco14-strips', 'thoughtful-mco14-strips', 'transport-mco14-strips', 'visitall-mco14-strips', ] def suite_ipc14_mco(): return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) def suite_ipc14_opt_adl(): return [ 'cavediving-opt14-adl', 'citycar-opt14-adl', 'maintenance-opt14-adl', ] def suite_ipc14_opt_strips(): return [ 'barman-opt14-strips', 'childsnack-opt14-strips', 'floortile-opt14-strips', 'ged-opt14-strips', 'hiking-opt14-strips', 'openstacks-opt14-strips', 'parking-opt14-strips', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'transport-opt14-strips', 'visitall-opt14-strips', ] def suite_ipc14_opt(): return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) def suite_ipc14_sat_adl(): return [ 'cavediving-sat14-adl', 'citycar-sat14-adl', 'maintenance-sat14-adl', ] def suite_ipc14_sat_strips(): return [ 'barman-sat14-strips', 'childsnack-sat14-strips', 'floortile-sat14-strips', 'ged-sat14-strips', 'hiking-sat14-strips', 'openstacks-sat14-strips', 'parking-sat14-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'transport-sat14-strips', 'visitall-sat14-strips', ] def suite_ipc14_sat(): return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) def suite_ipc14(): return sorted( suite_ipc14_agl() + suite_ipc14_mco() + suite_ipc14_opt() + suite_ipc14_sat()) def suite_unsolvable(): # TODO: Add other unsolvable problems (Miconic-FullADL). # TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl' # if the extra-domains branch is merged. return sorted( ['mystery:prob%02d.pddl' % index for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) def suite_optimal_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_opt_adl()) def suite_optimal_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + suite_ipc11_opt()) def suite_optimal(): return sorted(suite_optimal_adl() + suite_optimal_strips()) def suite_satisficing_adl(): return sorted( suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + suite_ipc08_sat_adl()) def suite_satisficing_strips(): return sorted( suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + suite_ipc11_sat()) def suite_satisficing(): return sorted(suite_satisficing_adl() + suite_satisficing_strips()) def suite_all(): return sorted( suite_ipc98_to_ipc04() + suite_ipc06() + suite_ipc06_strips_compilations() + suite_ipc08() + suite_ipc11() + suite_alternative_formulations())
7,695
23.35443
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue632/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=None, **kwargs): """ If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(os.path.join(repo, "benchmarks"), suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,481
33.963585
83
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue632/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
35.654206
84
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue469/raw_memory_parser.py
#! /usr/bin/env python from lab.parser import Parser class RawMemoryParser(Parser): def __init__(self): Parser.__init__(self) self.add_pattern('raw_memory', r'Peak memory: (.+) KB', type=int, required=False) if __name__ == '__main__': parser = RawMemoryParser() print 'Running RawMemoryParser parser' parser.parse()
353
21.125
89
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue469/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return (node.endswith("cluster.bc2.ch") or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" # TODO: Once we have reference results, we should add "quality". # TODO: Add something about errors/exit codes. DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for attribute in valid_attributes: name = "-".join([rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,741
35.614943
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue469/issue469.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute from lab.suites import suite_all import common_setup import os exp = common_setup.IssueExperiment( search_revisions=["issue469-base", "issue469-v1"], configs={"astar_blind": ["--search", "astar(blind())"]}, suite=suite_all(), ) parser = os.path.join(common_setup.get_script_dir(), 'raw_memory_parser.py') exp.add_search_parser(parser) def add_unexplained_errors_as_int(run): if run.get('error').startswith('unexplained'): run['unexplained_errors'] = 1 else: run['unexplained_errors'] = 0 return run exp.add_absolute_report_step( attributes=['raw_memory', Attribute('unexplained_errors', absolute=True)], filter=add_unexplained_errors_as_int ) exp()
841
22.388889
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue838/v2-cache-size.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue838-v2"] CONFIG_NICKS = [ ("lazy-greedy-cg-cache-size-{cache_size}".format(**locals()), [ "--heuristic", "h=cg(max_cache_size={cache_size})".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"]) for cache_size in ["0", "1K", "1M", "2M", "5M", "10M", "20M", "50M", "100M", "1000M"] ] CONFIGS = [ IssueConfig(config_nick, config) for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_absolute_report_step() #exp.add_comparison_table_step() #attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES #algorithm_pairs = [ # ("issue838-v1-{build}-lazy-greedy-cg-use-cache-False".format(**locals()), # "issue838-v1-{build}-lazy-greedy-cg-use-cache-True".format(**locals()), # "Diff ({build})".format(**locals())) # for build in BUILDS] #exp.add_report( # ComparativeReport(algorithm_pairs, attributes=attributes), # name="{SCRIPT_NAME}".format(**locals())) exp.run_steps()
2,110
29.594203
89
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue838/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'data-network-opt18-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'organic-synthesis-opt18-strips', 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'snake-opt18-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', 'termes-opt18-strips', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'agricola-sat18-strips', 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'data-network-sat18-strips', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'flashfill-sat18-adl', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'organic-synthesis-sat18-strips', 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', 'termes-sat18-strips', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "planner_memory", "planner_time", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,786
36.435443
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue838/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue838/v1-use-cache.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import itertools import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue838-v1"] BUILDS = ["release32", "release64"] CONFIG_NICKS = [ ("lazy-greedy-cg-use-cache-{use_cache}".format(**locals()), [ "--heuristic", "h=cg(use_cache={use_cache})".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"]) for use_cache in [True, False] ] CONFIGS = [ IssueConfig( build + "-" + config_nick, config, build_options=[build], driver_options=["--build", build]) for rev in REVISIONS for build in BUILDS for config_nick, config in CONFIG_NICKS ] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_2", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() #exp.add_comparison_table_step() attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES algorithm_pairs = [ ("issue838-v1-{build}-lazy-greedy-cg-use-cache-False".format(**locals()), "issue838-v1-{build}-lazy-greedy-cg-use-cache-True".format(**locals()), "Diff ({build})".format(**locals())) for build in BUILDS] exp.add_report( ComparativeReport(algorithm_pairs, attributes=attributes), name="{SCRIPT_NAME}".format(**locals())) exp.run_steps()
2,229
28.342105
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue633/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment configs = [ IssueConfig( "cegar-10K-original", ["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]), IssueConfig( "cegar-10K-landmarks-goals", ["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=10000,max_time=infinity))"]), IssueConfig( "cegar-900s-landmarks-goals", ["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=infinity,max_time=900))"]), ] exp = IssueExperiment( revisions=["issue633-v1", "issue633-v2"], configs=configs, suite=suites.suite_optimal_with_ipc11(), test_suite=["depot:pfile1"], email="[email protected]", ) exp.add_comparison_table_step() exp()
860
25.90625
106
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue633/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment configs = [ IssueConfig( "cegar-10K-original", ["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]), IssueConfig( "cegar-10K-landmarks-goals", ["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=10000,max_time=infinity))"]), IssueConfig( "cegar-900s-landmarks-goals", ["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=infinity,max_time=900))"]), ] exp = IssueExperiment( revisions=["issue633-base", "issue633-v1"], configs=configs, suite=suites.suite_optimal_with_ipc11(), test_suite=["depot:pfile1"], email="[email protected]", ) exp.add_comparison_table_step() exp()
862
25.96875
106
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue633/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Wrapper for FastDownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=1, **kwargs): """Create a DownwardExperiment with some convenience features. If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(os.path.join(repo, "benchmarks"), suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,539
34.027933
83
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue791/v2-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue791-base", "issue791-v2"] CONFIGS = [ IssueConfig( 'blind-debug', ['--search', 'astar(blind())'], build_options=["debug32"], driver_options=["--build", "debug32", "--overall-time-limit", "5m"] ), IssueConfig( 'blind-release', ['--search', 'astar(blind())'], build_options=["release32"], driver_options=["--build", "release32", "--overall-time-limit", "5m"] ), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step() exp.run_steps()
1,509
26.454545
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue791/v1-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue791-base", "issue791-v1"] CONFIGS = [ IssueConfig( 'blind-debug', ['--search', 'astar(blind())'], build_options=["debug32"], driver_options=["--build", "debug32", "--overall-time-limit", "5m"] ), IssueConfig( 'blind-release', ['--search', 'astar(blind())'], build_options=["release32"], driver_options=["--build", "release32", "--overall-time-limit", "5m"] ), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step() exp.run_steps()
1,509
26.454545
77
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue791/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,153
35.955614
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue791/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue526/v1-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue526-base", "issue526-v1"] SUITE = common_setup.DEFAULT_SATISFICING_SUITE ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) CONFIGS = [ IssueConfig( "ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"], driver_options=["--overall-time-limit", "5m"]), IssueConfig( "lama-first-lazy", [], driver_options=["--alias", "lama-first", "--overall-time-limit", "5m"]), IssueConfig( "lama-first-eager", ["--evaluator", """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), transform=adapt_costs(one))""", "--evaluator", "hff=ff_synergy(hlm)", "--search", """eager_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one)"""], driver_options=["--overall-time-limit", "5m"]), ] if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') #exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["memory", "total_time"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain")), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) exp.run_steps()
2,384
29.974026
94
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue526/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,153
35.955614
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue526/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue508/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,755
35.135977
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue508/mas.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup import configs REVS = ["issue508-base", "issue508-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() configs_optimal_core = configs.default_configs_optimal(ipc=False) CONFIGS = {} for nick in ["astar_merge_and_shrink_bisim", "astar_merge_and_shrink_greedy_bisim"]: CONFIGS[nick] = configs_optimal_core[nick] exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
605
20.642857
84
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue508/configs.py
def configs_optimal_core(): return { # A* "astar_blind": [ "--search", "astar(blind)"], "astar_h2": [ "--search", "astar(hm(2))"], "astar_ipdb": [ "--search", "astar(ipdb)"], "astar_lmcount_lm_merged_rhw_hm": [ "--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], "astar_lmcut": [ "--search", "astar(lmcut)"], "astar_hmax": [ "--search", "astar(hmax)"], "astar_merge_and_shrink_bisim": [ "--search", "astar(merge_and_shrink(" "merge_strategy=merge_linear(variable_order=reverse_level)," "shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false," "group_by_h=true)))"], "astar_merge_and_shrink_greedy_bisim": [ "--search", "astar(merge_and_shrink(" "merge_strategy=merge_linear(variable_order=reverse_level)," "shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1," "greedy=true,group_by_h=false)))"], "astar_merge_and_shrink_dfp_bisim": [ "--search", "astar(merge_and_shrink(merge_strategy=merge_dfp," "shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1," "greedy=false,group_by_h=true)))"], "astar_selmax_lmcut_lmcount": [ "--search", "astar(selmax([lmcut(),lmcount(lm_merged([lm_hm(m=1),lm_rhw()])," "admissible=true)],training_set=1000),mpd=true)"], } def configs_satisficing_core(): return { # A* "astar_goalcount": [ "--search", "astar(goalcount)"], # eager greedy "eager_greedy_ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_add": [ "--heuristic", "h=add()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_cg": [ "--heuristic", "h=cg()", "--search", "eager_greedy(h, preferred=h)"], "eager_greedy_cea": [ "--heuristic", "h=cea()", "--search", "eager_greedy(h, preferred=h)"], # lazy greedy "lazy_greedy_ff": [ "--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_add": [ "--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"], "lazy_greedy_cg": [ "--heuristic", "h=cg()", "--search", "lazy_greedy(h, preferred=h)"], } def configs_optimal_ipc(): return { "seq_opt_merge_and_shrink": ["ipc", "seq-opt-merge-and-shrink"], "seq_opt_fdss_1": ["ipc", "seq-opt-fdss-1"], "seq_opt_fdss_2": ["ipc", "seq-opt-fdss-2"], } def configs_satisficing_ipc(): return { "seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"], "seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"], "seq_sat_fdss_2": ["ipc", "seq-sat-fdss-2"], } def configs_optimal_extended(): return { # A* "astar_lmcount_lm_merged_rhw_hm_no_order": [ "--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], } def configs_satisficing_extended(): return { # eager greedy "eager_greedy_alt_ff_cg": [ "--heuristic", "hff=ff()", "--heuristic", "hcg=cg()", "--search", "eager_greedy(hff,hcg,preferred=[hff,hcg])"], "eager_greedy_ff_no_pref": [ "--search", "eager_greedy(ff())"], # lazy greedy "lazy_greedy_alt_cea_cg": [ "--heuristic", "hcea=cea()", "--heuristic", "hcg=cg()", "--search", "lazy_greedy(hcea,hcg,preferred=[hcea,hcg])"], "lazy_greedy_ff_no_pref": [ "--search", "lazy_greedy(ff())"], "lazy_greedy_cea": [ "--heuristic", "h=cea()", "--search", "lazy_greedy(h, preferred=h)"], # lazy wA* "lazy_wa3_ff": [ "--heuristic", "h=ff()", "--search", "lazy_wastar(h,w=3,preferred=h)"], # eager wA* "eager_wa3_cg": [ "--heuristic", "h=cg()", "--search", "eager(single(sum([g(),weight(h,3)])),preferred=h)"], # ehc "ehc_ff": [ "--search", "ehc(ff())"], # iterated "iterated_wa_ff": [ "--heuristic", "h=ff()", "--search", "iterated([lazy_wastar(h,w=10), lazy_wastar(h,w=5), lazy_wastar(h,w=3)," "lazy_wastar(h,w=2), lazy_wastar(h,w=1)])"], # pareto open list "pareto_ff": [ "--heuristic", "h=ff()", "--search", "eager(pareto([sum([g(), h]), h]), reopen_closed=true, pathmax=false," "f_eval=sum([g(), h]))"], # bucket-based open list "bucket_lmcut": [ "--heuristic", "h=lmcut()", "--search", "eager(single_buckets(h), reopen_closed=true, pathmax=false)"], } def default_configs_optimal(core=True, ipc=True, extended=False): configs = {} if core: configs.update(configs_optimal_core()) if ipc: configs.update(configs_optimal_ipc()) if extended: configs.update(configs_optimal_extended()) return configs def default_configs_satisficing(core=True, ipc=True, extended=False): configs = {} if core: configs.update(configs_satisficing_core()) if ipc: configs.update(configs_satisficing_ipc()) if extended: configs.update(configs_satisficing_extended()) return configs
6,207
29.282927
89
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue701/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from downward import suites from common_setup import IssueConfig, IssueExperiment, is_test_run BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue701-base", "issue701-v1"] CONFIGS = [ IssueConfig( alias, [], driver_options=["--alias", alias]) for alias in [ "seq-sat-fd-autotune-1", "seq-sat-fd-autotune-2", "seq-sat-lama-2011", "seq-sat-fdss-2014"] ] SUITE = suites.suite_all() ENVIRONMENT = MaiaEnvironment( priority=-10, email="[email protected]") if is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() exp()
962
22.487805
66
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue701/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step( 'publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step( "publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step(step_name, make_scatter_plots))
11,446
33.068452
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue701/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
3,921
35.654206
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-blind', ['--search', 'astar(blind())']), IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()]))']), IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()]))']), } exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_comparison_table_step() for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-base-%s" % config.nick, "issue627-v2-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v2_memory_%s.png' % config.nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-base-%s" % config.nick, "issue627-v2-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v2_total_time_%s.png' % config.nick ) exp() main(revisions=['issue627-base', 'issue627-v2'])
1,819
32.090909
106
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/merge-v3-v5.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup_no_benchmarks import IssueConfig, IssueExperiment, get_script_dir from relativescatter import RelativeScatterPlotReport import os def main(revisions=None): exp = IssueExperiment(benchmarks_dir=".", suite=[]) exp.add_fetcher( os.path.join(get_script_dir(), "data", "issue627-v3-eval"), filter=lambda(run): "base" not in run["config"], ) exp.add_fetcher( os.path.join(get_script_dir(), "data", "issue627-v5-eval"), filter=lambda(run): "base" not in run["config"], ) for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-v3-%s" % config_nick, "issue627-v5-%s" % config_nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_v3_v5_memory_%s.png' % config_nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-v3-%s" % config_nick, "issue627-v5-%s" % config_nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_v3_v5_total_time_%s.png' % config_nick ) exp() main(revisions=['issue627-v3', 'issue627-v5'])
1,574
32.510638
116
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/common_setup_no_benchmarks.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=None, **kwargs): """ If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(benchmarks_dir, suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,496
33.907821
83
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/v5-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup_no_benchmarks import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport def main(revisions=None): suite = suites.suite_satisficing_with_ipc11() configs = { IssueConfig('lazy-greedy-ff', [ '--heuristic', 'h=ff()', '--search', 'lazy_greedy(h, preferred=h)' ]), IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first'] ), IssueConfig('eager_greedy_cg', [ '--heuristic', 'h=cg()', '--search', 'eager_greedy(h, preferred=h)' ]), IssueConfig('eager_greedy_cea', [ '--heuristic', 'h=cea()', '--search', 'eager_greedy(h, preferred=h)' ]), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_comparison_table_step() for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-v3-base-%s" % config.nick, "issue627-v5-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v5_sat_memory_%s.png' % config.nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-v3-base-%s" % config.nick, "issue627-v5-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v5_sat_total_time_%s.png' % config.nick ) exp() main(revisions=['issue627-v3-base', 'issue627-v5'])
2,128
28.569444
74
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/merge-v3-v4.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup_no_benchmarks import IssueConfig, IssueExperiment, get_script_dir from relativescatter import RelativeScatterPlotReport import os def main(revisions=None): exp = IssueExperiment(benchmarks_dir=".", suite=[]) exp.add_fetcher( os.path.join(get_script_dir(), "data", "issue627-v3-eval"), filter=lambda(run): "base" not in run["config"], ) exp.add_fetcher( os.path.join(get_script_dir(), "data", "issue627-v4-eval"), filter=lambda(run): "base" not in run["config"], ) for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-v3-%s" % config_nick, "issue627-v4-%s" % config_nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_v3_v4_memory_%s.png' % config_nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-v3-%s" % config_nick, "issue627-v4-%s" % config_nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_v3_v4_total_time_%s.png' % config_nick ) exp() main(revisions=['issue627-v3', 'issue627-v4'])
1,574
32.510638
116
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/v4.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup_no_benchmarks import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-blind', ['--search', 'astar(blind())']), IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()], max_states=10000, max_time=infinity))']), IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()], max_states=10000, max_time=infinity))']), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_comparison_table_step() for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-v3-base-%s" % config.nick, "issue627-v4-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v4_memory_%s.png' % config.nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-v3-base-%s" % config.nick, "issue627-v4-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v4_total_time_%s.png' % config.nick ) exp() main(revisions=['issue627-v3-base', 'issue627-v4'])
1,988
34.517857
143
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()]))']), IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()]))']), } exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_comparison_table_step() for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-base-%s" % config.nick, "issue627-v1-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v1_memory_%s.png' % config.nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-base-%s" % config.nick, "issue627-v1-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v1_total_time_%s.png' % config.nick ) exp() main(revisions=['issue627-base', 'issue627-v1'])
1,621
30.192308
106
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/v1-noise.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-cegar-original-10000', ['--search', 'astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))']), IssueConfig('astar-cegar-lm-goals-10000', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()],max_states=10000,max_time=infinity))']), } exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_comparison_table_step() for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-base-%s" % config.nick, "4ed2abfab4ba-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_memory_%s.png' % config.nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-base-%s" % config.nick, "4ed2abfab4ba-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_total_time_%s.png' % config.nick ) exp() main(revisions=['issue627-base', '4ed2abfab4ba'])
1,696
31.634615
147
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareConfigsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Wrapper for FastDownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, suite, revisions=[], configs={}, grid_priority=None, path=None, test_suite=None, email=None, processes=1, **kwargs): """Create a DownwardExperiment with some convenience features. If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) *configs* must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(..., suite=suites.suite_all()) IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(..., suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(..., grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) If *email* is specified, it should be an email address. This email address will be notified upon completion of the experiments if it is run on the cluster. """ if is_test_run(): kwargs["environment"] = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment( priority=grid_priority, email=email) path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) repo = get_repo_base() for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), repo, rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self.add_suite(os.path.join(repo, "benchmarks"), suite) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join(self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = CompareConfigsReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + "." + report.output_format) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare" % (self.name, rev1, rev2) + ".html") subprocess.call(['publish', outfile]) self.add_step(Step("make-comparison-tables", make_comparison_tables)) self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,539
34.027933
83
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup_no_benchmarks import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-blind', ['--search', 'astar(blind())']), IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()], max_states=10000, max_time=infinity))']), IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()], max_states=10000, max_time=infinity))']), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_comparison_table_step() for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-v3-base-%s" % config.nick, "issue627-v3-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v3_memory_%s.png' % config.nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-v3-base-%s" % config.nick, "issue627-v3-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v3_total_time_%s.png' % config.nick ) exp() main(revisions=['issue627-v3-base', 'issue627-v3'])
1,988
34.517857
143
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/relativescatter.py
# -*- coding: utf-8 -*- # # downward uses the lab package to conduct experiments with the # Fast Downward planning system. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from collections import defaultdict import os from lab import tools from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
4,690
35.937008
84
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/v5.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup_no_benchmarks import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-blind', ['--search', 'astar(blind())']), IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()], max_states=10000, max_time=infinity))']), IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()], max_states=10000, max_time=infinity))']), } exp = IssueExperiment( benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_comparison_table_step() for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-v3-base-%s" % config.nick, "issue627-v5-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v5_memory_%s.png' % config.nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-v3-base-%s" % config.nick, "issue627-v5-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v5_total_time_%s.png' % config.nick ) exp() main(revisions=['issue627-v3-base', 'issue627-v5'])
1,988
34.517857
143
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue627/v1-limit.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport def main(revisions=None): suite = suites.suite_optimal_with_ipc11() configs = { IssueConfig('astar-cegar-original-10000', ['--search', 'astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))']), IssueConfig('astar-cegar-lm-goals-10000', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()],max_states=10000,max_time=infinity))']), } exp = IssueExperiment( revisions=revisions, configs=configs, suite=suite, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_comparison_table_step() for config in configs: exp.add_report( RelativeScatterPlotReport( attributes=["memory"], filter_config=["issue627-base-%s" % config.nick, "issue627-v1-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v1_memory_%s.png' % config.nick ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], filter_config=["issue627-base-%s" % config.nick, "issue627-v1-%s" % config.nick], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue627_base_v1_total_time_%s.png' % config.nick ) exp() main(revisions=['issue627-base', 'issue627-v1'])
1,699
31.692308
147
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue794/v2-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue794-base", "issue794-v2"] CONFIGS = [ IssueConfig('blind', ['--search', 'astar(blind())']), ] SUITE = [ 'assembly', 'miconic-fulladl', 'openstacks', 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'trucks', ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser("axiom_time_parser.py") exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step(attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["axiom_time_inner", "axiom_time_outer"]) for attribute in ["axiom_time_inner", "axiom_time_outer"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
1,948
29.936508
113
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue794/v1-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue794-base", "issue794-v1"] CONFIGS = [ IssueConfig('blind', ['--search', 'astar(blind())']), ] SUITE = [ 'assembly', 'miconic-fulladl', 'openstacks', 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'trucks', ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser("axiom_time_parser.py") exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step(attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["axiom_time_inner", "axiom_time_outer"]) for attribute in ["axiom_time_inner", "axiom_time_outer"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
1,948
29.936508
113
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue794/v4-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue794-v1", "issue794-v4"] CONFIGS = [ IssueConfig('blind', ['--search', 'astar(blind())']), ] SUITE = [ 'assembly', 'miconic-fulladl', 'openstacks', 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'trucks', ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser("axiom_time_parser.py") exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step(attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["axiom_time_inner", "axiom_time_outer"]) for attribute in ["axiom_time_inner", "axiom_time_outer"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
1,946
29.904762
113
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue794/axiom_time_parser.py
#! /usr/bin/env python from lab.parser import Parser print 'Running axiom evaluation time parser' parser = Parser() parser.add_pattern('axiom_time_inner', r'AxiomEvaluator time in inner evaluate: (.+)', type=float) parser.add_pattern('axiom_time_outer', r'AxiomEvaluator time in outer evaluate: (.+)', type=float) parser.parse()
332
29.272727
98
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue794/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,153
35.955614
82
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue794/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue794/v3-opt.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue794-base", "issue794-v3"] CONFIGS = [ IssueConfig('blind', ['--search', 'astar(blind())']), ] SUITE = [ 'assembly', 'miconic-fulladl', 'openstacks', 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', 'psr-large', 'psr-middle', 'trucks', ] ENVIRONMENT = BaselSlurmEnvironment( partition="infai_1", email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"]) if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_parser(exp.EXITCODE_PARSER) exp.add_parser(exp.TRANSLATOR_PARSER) exp.add_parser(exp.SINGLE_SEARCH_PARSER) exp.add_parser(exp.PLANNER_PARSER) exp.add_parser("axiom_time_parser.py") exp.add_step('build', exp.build) exp.add_step('start', exp.start_runs) exp.add_fetcher(name='fetch') exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_comparison_table_step(attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["axiom_time_inner", "axiom_time_outer"]) for attribute in ["axiom_time_inner", "axiom_time_outer"]: for config in CONFIGS: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) ) exp.run_steps()
1,948
29.936508
113
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue722/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.reports import Attribute, geometric_mean from lab.environments import LocalEnvironment, MaiaEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment, get_algo_nick, get_repo_base BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue722-base", "issue722-v1"] CONFIGS = [ IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), IssueConfig('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), #IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), #IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), #IssueConfig('sccs-dfp-ginf', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), #IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), #IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), #IssueConfig('sccs-dfp-f50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000))']), ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=4) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['{ms_parser}']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step() exp.add_scatter_plot_step() exp.run_steps()
5,364
68.675325
465
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue722/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'none' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) def check_proved_unsolvability(content, props): proved_unsolvability = False if props['coverage'] == 0: for line in content.splitlines(): if line == 'Completely explored state space -- no solution!': proved_unsolvability = True break props['proved_unsolvability'] = proved_unsolvability parser.add_function(check_proved_unsolvability) parser.parse()
2,784
37.150685
135
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue722/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport try: from relativescatter import RelativeScatterPlotReport matplotlib = True except ImportError: print 'matplotlib not availabe, scatter plots not available' matplotlib = False def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if matplotlib: if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,453
35.872449
81
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue722/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue662/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- #! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment import common_setup from common_setup import IssueConfig, IssueExperiment, get_algo_nick, get_repo_base from relativescatter import RelativeScatterPlotReport DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue662-base", "issue662-v1"] CONFIGS = [ IssueConfig( 'astar-lmcut-static', ['--search', 'astar(lmcut())'], build_options=["release32"], driver_options=["--build=release32", "--search-time-limit", "60s"] ), IssueConfig( 'astar-lmcut-dynamic', ['--search', 'astar(lmcut())'], build_options=["release32dynamic"], driver_options=["--build=release32dynamic", "--search-time-limit", "60s"] ) ] SUITE = common_setup.DEFAULT_OPTIMAL_SUITE ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=[], configs=[], environment=ENVIRONMENT, ) for rev in REVISIONS: for config in CONFIGS: if rev.endswith("base") and config.nick.endswith("dynamic"): continue exp.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_absolute_report_step() exp.add_comparison_table_step() for attribute in ["total_time"]: for algo1, algo2 in [("issue662-base-astar-lmcut-static", "issue662-v1-astar-lmcut-static"), ("issue662-v1-astar-lmcut-static", "issue662-v1-astar-lmcut-dynamic")]: exp.add_report( RelativeScatterPlotReport( attributes=[attribute], filter_algorithm=[algo1, algo2], get_category=lambda run1, run2: run1.get("domain"), ), outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, algo1, algo2) ) exp.run_steps()
2,394
29.705128
95
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue662/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, outfile=outfile) self.add_step( 'publish-absolute-report', subprocess.call, ['publish', outfile]) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.%s" % ( self.name, rev1, rev2, report.output_format)) report(self.eval_dir, outfile) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) subprocess.call(["publish", outfile]) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step( "publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,171
35.715026
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue662/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue528/issue528-v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import configs, suites from downward.reports.scatter import ScatterPlotReport import common_setup SEARCH_REVS = ["issue528-base", "issue528-v3"] SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "astar_lmcut": ["--search", "astar(lmcut())"] } exp = common_setup.IssueExperiment( revisions=SEARCH_REVS, configs=CONFIGS, suite=SUITE, ) exp.add_absolute_report_step() exp.add_comparison_table_step() for attr in ("memory", "total_time"): exp.add_report( ScatterPlotReport( attributes=[attr], filter_config=[ "issue528-base-astar_lmcut", "issue528-v3-astar_lmcut", ], ), outfile='issue528_base_v3_%s.png' % attr ) exp()
810
19.794872
54
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue528/issue528.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import configs, suites from downward.reports.scatter import ScatterPlotReport import common_setup SEARCH_REVS = ["issue528-base", "issue528-v1", "issue528-v2"] SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "astar_lmcut": ["--search", "astar(lmcut())"] } exp = common_setup.IssueExperiment( revisions=SEARCH_REVS, configs=CONFIGS, suite=SUITE, ) exp.add_absolute_report_step() exp.add_comparison_table_step() for attr in ("memory", "total_time"): exp.add_report( ScatterPlotReport( attributes=[attr], filter_config=[ "issue528-base-astar_lmcut", "issue528-v2-astar_lmcut", ], ), outfile='issue528_base_v2_%s.png' % attr ) exp()
825
20.179487
61
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue528/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.reports import Table from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports import PlanningReport from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" # TODO: Add something about errors/exit codes. DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for attribute in valid_attributes: name = "-".join([rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots)) class RegressionReport(PlanningReport): """ Compare revisions for tasks on which the first revision performs better than other revisions. *revision_nicks* must be a list of revision_nicks, e.g. ["default", "issue123"]. *config_nicks* must be a list of configuration nicknames, e.g. ["eager_greedy_ff", "eager_greedy_add"]. *regression_attribute* is the attribute that we compare between different revisions. It defaults to "coverage". Example comparing search_time for tasks were we lose coverage:: exp.add_report(RegressionReport(revision_nicks=["default", "issue123"], config_nicks=["eager_greedy_ff"], regression_attribute="coverage", attributes="search_time")) """ def __init__(self, revision_nicks, config_nicks, regression_attribute="coverage", **kwargs): PlanningReport.__init__(self, **kwargs) assert revision_nicks self.revision_nicks = revision_nicks assert config_nicks self.config_nicks = config_nicks self.regression_attribute = regression_attribute def get_markup(self): tables = [] for (domain, problem) in self.problems: for config_nick in self.config_nicks: runs = [self.runs[(domain, problem, rev + "-" + config_nick)] for rev in self.revision_nicks] if any(runs[0][self.regression_attribute] > runs[i][self.regression_attribute] for i in range(1, len(self.revision_nicks))): print "\"%s:%s\"," % (domain, problem) table = Table() for rev, run in zip(self.revision_nicks, runs): for attr in self.attributes: table.add_cell(rev, attr, run.get(attr)) table_name = ":".join((domain, problem, config_nick)) tables.append((table_name, table)) return "\n".join(name + "\n" + str(table) for name, table in tables)
14,920
36.3025
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue555/issue555-v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue555-base", "issue555-v2"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { 'astar_h2': [ '--search', 'astar(hm(2))'], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
467
16.333333
41
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue555/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,755
35.135977
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue555/issue555-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup REVS = ["issue555-base", "issue555-v1"] LIMITS = {"search_time": 1800} SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { 'astar_h2': [ '--search', 'astar(hm(2))'], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step() exp()
467
16.333333
41
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue533/exp1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites import common_setup CONFIGS = { "blind": [ "--search", "astar(blind)"], "ff": [ "--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"], } REVS = ["issue533-base", "issue533-v1", "issue533-v1-debug"] LIMITS = {"search_time": 300} # We define a suite that consists of (really) all domains because for # translator issues like this one, it's interesting what we do in # obscure cases like airport-adl. The following is simply a list of # all domains that were in the benchmarks directory at the time of # this writing. SUITE = [ "airport", "airport-adl", "assembly", "barman-opt11-strips", "barman-sat11-strips", "blocks", "depot", "driverlog", "elevators-opt08-strips", "elevators-opt11-strips", "elevators-sat08-strips", "elevators-sat11-strips", "floortile-opt11-strips", "floortile-sat11-strips", "freecell", "grid", "gripper", "logistics00", "logistics98", "miconic", "miconic-fulladl", "miconic-simpleadl", "movie", "mprime", "mystery", "no-mprime", "no-mystery", "nomystery-opt11-strips", "nomystery-sat11-strips", "openstacks", "openstacks-opt08-adl", "openstacks-opt08-strips", "openstacks-opt11-strips", "openstacks-sat08-adl", "openstacks-sat08-strips", "openstacks-sat11-strips", "openstacks-strips", "optical-telegraphs", "parcprinter-08-strips", "parcprinter-opt11-strips", "parcprinter-sat11-strips", "parking-opt11-strips", "parking-sat11-strips", "pathways", "pathways-noneg", "pegsol-08-strips", "pegsol-opt11-strips", "pegsol-sat11-strips", "philosophers", "pipesworld-notankage", "pipesworld-tankage", "psr-large", "psr-middle", "psr-small", "rovers", "satellite", "scanalyzer-08-strips", "scanalyzer-opt11-strips", "scanalyzer-sat11-strips", "schedule", "sokoban-opt08-strips", "sokoban-opt11-strips", "sokoban-sat08-strips", "sokoban-sat11-strips", "storage", "tidybot-opt11-strips", "tidybot-sat11-strips", "tpp", "transport-opt08-strips", "transport-opt11-strips", "transport-sat08-strips", "transport-sat11-strips", "trucks", "trucks-strips", "visitall-opt11-strips", "visitall-sat11-strips", "woodworking-opt08-strips", "woodworking-opt11-strips", "woodworking-sat08-strips", "woodworking-sat11-strips", "zenotravel", ] exp = common_setup.IssueExperiment( revisions=REVS, configs=CONFIGS, suite=SUITE, limits=LIMITS, ) exp.add_comparison_table_step( attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["translate_*", "translator_*"]) exp()
2,861
21.896
69
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue533/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,755
35.135977
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue582/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from relativescatter import RelativeScatterPlotReport import common_setup REVS = ["issue582-base", "issue582-v1"] SUITE = suites.suite_optimal_with_ipc11() CONFIGS = { "astar_lmcut": [ "--search", "astar(lmcut())"], } exp = common_setup.IssueExperiment( search_revisions=REVS, configs=CONFIGS, suite=SUITE, ) exp.add_report( RelativeScatterPlotReport( attributes=["total_time"], get_category=lambda run1, run2: run1.get("domain"), ), outfile='issue582_base_v1_total_time.png' ) exp.add_comparison_table_step() exp()
663
17.971429
59
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue582/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in self.get_supported_attributes( config_nick, attributes): make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
12,856
34.913408
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue582/relativescatter.py
# -*- coding: utf-8 -*- # # downward uses the lab package to conduct experiments with the # Fast Downward planning system. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from collections import defaultdict import os from lab import tools from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
4,690
35.937008
84
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue732/v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, MaiaEnvironment from lab.reports import Attribute, arithmetic_mean, finite_sum, geometric_mean import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue732-base", "issue732-v1"] CONFIGS = [ IssueConfig( 'astar-inf', ['--search', 'astar(const(infinity))'], ), IssueConfig( 'astar-blind', ['--search', 'astar(blind())'], ), IssueConfig( 'debug-astar-inf', ['--search', 'astar(const(infinity))'], build_options=["debug32"], driver_options=["--build=debug32"], ), IssueConfig( 'debug-astar-blind', ['--search', 'astar(blind())'], build_options=["debug32"], driver_options=["--build=debug32"], ), ] SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | set(common_setup.DEFAULT_SATISFICING_SUITE))) ENVIRONMENT = MaiaEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') exp.add_command('sg-parser', ['{sg_parser}']) attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), "error", "run_dir", ] exp.add_absolute_report_step(attributes=attributes) exp.run_steps()
1,864
27.257576
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue732/sg-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('sg_construction_time', 'time for root successor generation creation: (.+)s', type=float) parser.add_pattern('sg_peak_mem_diff', 'peak memory difference for root successor generator creation: (\d+) KB', type=int) parser.parse()
322
28.363636
122
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue732/v6-debug.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, finite_sum import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue732-base", "issue732-v6"] BUILDS = ["debug32", "release32"] CONFIGS = [ IssueConfig( "lama-first-{build}".format(**locals()), [], build_options=[build], driver_options=["--alias", "lama-first", "--build", build]) for build in BUILDS ] SUITE = set( common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) ENVIRONMENT = BaselSlurmEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') exp.add_command('sg-parser', ['{sg_parser}']) attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), ] exp.add_comparison_table_step(attributes=attributes) exp.run_steps()
1,487
28.176471
80
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue732/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.experiment import ARGPARSER from lab import tools from downward.experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import ComparativeReport from downward.reports.scatter import ScatterPlotReport from relativescatter import RelativeScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() DEFAULT_OPTIMAL_SUITE = [ 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', 'depot', 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', 'openstacks-opt11-strips', 'openstacks-opt14-strips', 'openstacks-strips', 'parcprinter-08-strips', 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', 'transport-opt11-strips', 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', 'woodworking-opt08-strips', 'woodworking-opt11-strips', 'zenotravel'] DEFAULT_SATISFICING_SUITE = [ 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', 'mystery', 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', 'openstacks-sat08-strips', 'openstacks-sat11-strips', 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', 'parcprinter-sat11-strips', 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', 'tetris-sat14-strips', 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] def get_script(): """Get file name of main script.""" return tools.get_script_path() def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ( "cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_running_on_cluster_login_node(): return platform.node() == "login20.cluster.bc2.ch" def can_publish(): return is_running_on_cluster_login_node() or not is_running_on_cluster() def publish(report_file): if can_publish(): subprocess.call(["publish", report_file]) else: print "publishing reports is not supported on this node" def is_test_run(): return ARGS.test_run == "yes" or ( ARGS.test_run == "auto" and not is_running_on_cluster()) def get_algo_nick(revision, config_nick): return "{revision}-{config_nick}".format(**locals()) class IssueConfig(object): """Hold information about a planner configuration. See FastDownwardExperiment.add_algorithm() for documentation of the constructor's options. """ def __init__(self, nick, component_options, build_options=None, driver_options=None): self.nick = nick self.component_options = component_options self.build_options = build_options self.driver_options = driver_options class IssueExperiment(FastDownwardExperiment): """Subclass of FastDownwardExperiment with some convenience features.""" DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "error", "plan_length", "run_dir", ] def __init__(self, revisions=None, configs=None, path=None, **kwargs): """ You can either specify both *revisions* and *configs* or none of them. If they are omitted, you will need to call exp.add_algorithm() manually. If *revisions* is given, it must be a non-empty list of revision identifiers, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"], ...) If *configs* is given, it must be a non-empty list of IssueConfig objects. :: IssueExperiment(..., configs=[ IssueConfig("ff", ["--search", "eager_greedy(ff())"]), IssueConfig( "lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), ]) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ """ path = path or get_data_dir() FastDownwardExperiment.__init__(self, path=path, **kwargs) if (revisions and not configs) or (not revisions and configs): raise ValueError( "please provide either both or none of revisions and configs") for rev in revisions: for config in configs: self.add_algorithm( get_algo_nick(rev, config.nick), get_repo_base(), rev, config.component_options, build_options=config.build_options, driver_options=config.driver_options) self._revisions = revisions self._configs = configs @classmethod def _is_portfolio(cls, config_nick): return "fdss" in config_nick @classmethod def get_supported_attributes(cls, config_nick, attributes): if cls._is_portfolio(config_nick): return [attr for attr in attributes if attr in cls.PORTFOLIO_ATTRIBUTES] return attributes def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = os.path.join( self.eval_dir, get_experiment_name() + "." + report.output_format) self.add_report(report, name="make-absolute-report", outfile=outfile) self.add_step("publish-absolute-report", publish, outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revisions. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareConfigsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def get_revision_pairs_and_files(): for rev1, rev2 in itertools.combinations(self._revisions, 2): outfile = os.path.join( self.eval_dir, "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) yield (rev1, rev2, outfile) def make_comparison_tables(): for rev1, rev2, outfile in get_revision_pairs_and_files(): compared_configs = [] for config in self._configs: config_nick = config.nick compared_configs.append( ("%s-%s" % (rev1, config_nick), "%s-%s" % (rev2, config_nick), "Diff (%s)" % config_nick)) report = ComparativeReport(compared_configs, **kwargs) report(self.eval_dir, outfile) def publish_comparison_tables(): for _, _, outfile in get_revision_pairs_and_files(): publish(outfile) self.add_step("make-comparison-tables", make_comparison_tables) self.add_step("publish-comparison-tables", publish_comparison_tables) def add_scatter_plot_step(self, relative=False, attributes=None): """Add step creating (relative) scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revisions pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if relative: report_class = RelativeScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-relative") step_name = "make-relative-scatter-plots" else: report_class = ScatterPlotReport scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") step_name = "make-absolute-scatter-plots" if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "{}-{}".format(rev1, config_nick) algo2 = "{}-{}".format(rev2, config_nick) report = report_class( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report( self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config in self._configs: for rev1, rev2 in itertools.combinations(self._revisions, 2): for attribute in self.get_supported_attributes( config.nick, attributes): make_scatter_plot(config.nick, rev1, rev2, attribute) self.add_step(step_name, make_scatter_plots)
14,462
35.24812
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue732/v6.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, finite_sum from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = [ "issue732-{rev}".format(**locals()) for rev in ["base", "v1", "v2", "v3", "v4", "v5", "v6"]] BUILDS = ["release32"] SEARCHES = [ ("astar-inf", ["--search", "astar(const(infinity))"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), config, build_options=[build], driver_options=["--build", build]) for nick, config in SEARCHES for build in BUILDS ] SUITE = set( common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) ENVIRONMENT = BaselSlurmEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') exp.add_command('sg-parser', ['{sg_parser}']) attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), ] # Instead of comparing all revision pairs in separate reports, create a # single report comparing neighboring revisions. # exp.add_comparison_table_step(attributes=attributes) compared_configs = [] for rev1, rev2 in zip(REVISIONS[:-1], REVISIONS[1:]): for config in CONFIGS: config_nick = config.nick compared_configs.append( ("{rev1}-{config_nick}".format(**locals()), "{rev2}-{config_nick}".format(**locals()), "Diff ({config_nick})".format(**locals()))) exp.add_report( ComparativeReport(compared_configs, attributes=attributes), name="compare-all-tags") exp.run_steps()
2,255
30.333333
80
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue732/relativescatter.py
# -*- coding: utf-8 -*- from collections import defaultdict from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter( axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows a relative comparison of two algorithms with regard to the given attribute. The attribute value of algorithm 1 is shown on the x-axis and the relation to the value of algorithm 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['algorithm'] == self.algorithms[0] and run2['algorithm'] == self.algorithms[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.algorithms[0], val1) assert val2 > 0, (domain, problem, self.algorithms[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlot uses log-scaling on the x-axis by default. PlotReport._set_scales( self, xscale or self.attribute.scale or 'log', 'log')
3,875
35.566038
78
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue732/v7-debug.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, finite_sum import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = ["issue732-base", "issue732-v7"] BUILDS = ["debug32", "release32"] CONFIGS = [ IssueConfig( "lama-first-{build}".format(**locals()), [], build_options=[build], driver_options=["--alias", "lama-first", "--build", build]) for build in BUILDS ] SUITE = set( common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) ENVIRONMENT = BaselSlurmEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') exp.add_command('sg-parser', ['{sg_parser}']) attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), ] exp.add_comparison_table_step(attributes=attributes) exp.run_steps()
1,487
28.176471
80
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue732/v7.py
#! /usr/bin/env python # -*- coding: utf-8 -*- import os from lab.environments import LocalEnvironment, BaselSlurmEnvironment from lab.reports import Attribute, finite_sum from downward.reports.compare import ComparativeReport import common_setup from common_setup import IssueConfig, IssueExperiment DIR = os.path.dirname(os.path.abspath(__file__)) BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] REVISIONS = [ "issue732-{rev}".format(**locals()) for rev in ["base", "v1", "v2", "v3", "v4", "v5", "v6", "v7"]] BUILDS = ["release32"] SEARCHES = [ ("astar-inf", ["--search", "astar(const(infinity))"]), ] CONFIGS = [ IssueConfig( "{nick}-{build}".format(**locals()), config, build_options=[build], driver_options=["--build", build]) for nick, config in SEARCHES for build in BUILDS ] SUITE = set( common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) ENVIRONMENT = BaselSlurmEnvironment( priority=0, email="[email protected]") if common_setup.is_test_run(): SUITE = IssueExperiment.DEFAULT_TEST_SUITE ENVIRONMENT = LocalEnvironment(processes=1) exp = IssueExperiment( revisions=REVISIONS, configs=CONFIGS, environment=ENVIRONMENT, ) exp.add_suite(BENCHMARKS_DIR, SUITE) exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') exp.add_command('sg-parser', ['{sg_parser}']) attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), ] # Instead of comparing all revision pairs in separate reports, create a # single report comparing neighboring revisions. # exp.add_comparison_table_step(attributes=attributes) compared_configs = [] for rev1, rev2 in zip(REVISIONS[:-1], REVISIONS[1:]): for config in CONFIGS: config_nick = config.nick compared_configs.append( ("{rev1}-{config_nick}".format(**locals()), "{rev2}-{config_nick}".format(**locals()), "Diff ({config_nick})".format(**locals()))) exp.add_report( ComparativeReport(compared_configs, attributes=attributes), name="compare-all-tags") exp.run_steps()
2,261
30.416667
80
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue583/issue583-v4-dfp.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm import common_setup def main(revisions=None): SUITE = suites.suite_optimal_with_ipc11() B_CONFIGS = { 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } G_CONFIGS = { 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } F_CONFIGS = { 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], } CONFIGS = dict(B_CONFIGS) CONFIGS.update(G_CONFIGS) CONFIGS.update(F_CONFIGS) exp = common_setup.IssueExperiment( revisions=revisions, configs=CONFIGS, suite=SUITE, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) initial_h_value = Attribute('initial_h_value', absolute=False, min_wins=False) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, initial_h_value, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step(attributes=attributes) exp() if __name__ == '__main__': main(revisions=['issue583-base-v2', 'issue583-v4'])
3,036
39.493333
243
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue583/main.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from lab.reports import Attribute, gm import common_setup def main(revisions=None): SUITE = suites.suite_optimal_with_ipc11() B_CONFIGS = { 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } G_CONFIGS = { 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], } F_CONFIGS = { 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], } CONFIGS = dict(B_CONFIGS) CONFIGS.update(G_CONFIGS) CONFIGS.update(F_CONFIGS) exp = common_setup.IssueExperiment( revisions=revisions, configs=CONFIGS, suite=SUITE, test_suite=['depot:pfile1'], processes=4, email='[email protected]', ) exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') exp.add_command('ms-parser', ['ms_parser']) # planner outcome attributes perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) # m&s attributes ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) extra_attributes = [ perfect_heuristic, proved_unsolvability, actual_search_time, ms_construction_time, ms_abstraction_constructed, ms_final_size, ms_out_of_memory, ms_out_of_time, search_out_of_memory, search_out_of_time, ] attributes = exp.DEFAULT_TABLE_ATTRIBUTES attributes.extend(extra_attributes) exp.add_comparison_table_step(attributes=attributes) exp()
4,428
57.276316
277
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue583/issue583-v5.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(revisions=["issue583-base-v3", "issue583-v5"])
123
16.714286
51
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue583/issue583-v1.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(revisions=["issue583-base", "issue583-v1"])
120
16.285714
48
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue583/issue583-v3.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(revisions=["issue583-base-v2", "issue583-v3"])
123
16.714286
51
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue583/issue583-v2.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main(revisions=["issue583-v1", "issue583-v2"])
118
16
46
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue583/ms-parser.py
#! /usr/bin/env python from lab.parser import Parser parser = Parser() parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[.+s\]', required=False, type=float) parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) def check_ms_constructed(content, props): ms_construction_time = props.get('ms_construction_time') abstraction_constructed = False if ms_construction_time is not None: abstraction_constructed = True props['ms_abstraction_constructed'] = abstraction_constructed parser.add_function(check_ms_constructed) def check_proved_unsolvability(content, props): proved_unsolvability = False if props['coverage'] == 0: for line in content.splitlines(): if line == 'Completely explored state space -- no solution!': proved_unsolvability = True break props['proved_unsolvability'] = proved_unsolvability parser.add_function(check_proved_unsolvability) def check_planner_exit_reason(content, props): ms_abstraction_constructed = props.get('ms_abstraction_constructed') error = props.get('error') if error != 'none' and error != 'timeout' and error != 'out-of-memory': print 'error: %s' % error return # Check whether merge-and-shrink computation or search ran out of # time or memory. ms_out_of_time = False ms_out_of_memory = False search_out_of_time = False search_out_of_memory = False if ms_abstraction_constructed == False: if error == 'timeout': ms_out_of_time = True elif error == 'out-of-memory': ms_out_of_memory = True elif ms_abstraction_constructed == True: if error == 'timeout': search_out_of_time = True elif error == 'out-of-memory': search_out_of_memory = True props['ms_out_of_time'] = ms_out_of_time props['ms_out_of_memory'] = ms_out_of_memory props['search_out_of_time'] = search_out_of_time props['search_out_of_memory'] = search_out_of_memory parser.add_function(check_planner_exit_reason) def check_perfect_heuristic(content, props): plan_length = props.get('plan_length') expansions = props.get('expansions') if plan_length != None: perfect_heuristic = False if plan_length + 1 == expansions: perfect_heuristic = True props['perfect_heuristic'] = perfect_heuristic parser.add_function(check_perfect_heuristic) parser.parse()
2,646
35.763889
128
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue583/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import subprocess import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.steps import Step from downward.experiments.fast_downward_experiment import FastDownwardExperiment from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(FastDownwardExperiment): """Wrapper for FastDownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "error", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, revisions, suite, build_options=None, driver_options=None, grid_priority=None, test_suite=None, email=None, processes=1, **kwargs): """Create an FastDownwardExperiment with some convenience features. All configs will be run on all revisions. Inherited options *path*, *environment* and *cache_dir* from FastDownwardExperiment are not supported and will be automatically set. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. nick will automatically get the revision prepended, e.g. 'issue123-base-<nick>':: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *revisions* must be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): environment = LocalEnvironment(processes=processes) suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: environment = MaiaEnvironment(priority=grid_priority, email=email) FastDownwardExperiment.__init__(self, environment=environment, **kwargs) # Automatically deduce the downward repository from the file repo = get_repo_base() self.algorithm_nicks = [] self.revisions = revisions for nick, cmdline in configs.items(): for rev in revisions: algo_nick = '%s-%s' % (rev, nick) self.add_algorithm(algo_nick, repo, rev, cmdline, build_options, driver_options) self.algorithm_nicks.append(algo_nick) benchmarks_dir = os.path.join(repo, 'benchmarks') self.add_suite(benchmarks_dir, suite) self.search_parsers = [] def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) # oufile is of the form <rev1>-<rev2>-...-<revn>.<format> outfile = '' for rev in self.revisions: outfile += rev outfile += '-' outfile = outfile[:len(outfile)-1] outfile += '.' outfile += report.output_format outfile = os.path.join(self.eval_dir, outfile) self.add_report(report, outfile=outfile) self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revisions, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def publish_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revisions, 2): outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) subprocess.call(['publish', outfile]) self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) # TODO: this is copied from the old common_setup, but not tested # with the new FastDownwardExperiment class! def add_scatter_plot_step(self, attributes=None): print 'This has not been tested with the new FastDownwardExperiment class!' exit(0) """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plot(config_nick, rev1, rev2, attribute): name = "-".join([self.name, rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, rev1 + "-" + rev2, name)) def make_scatter_plots(): for config_nick in self._config_nicks: if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): for attribute in valid_attributes: make_scatter_plot(config_nick, rev1, rev2, attribute) self.add_step(Step("make-scatter-plots", make_scatter_plots))
11,089
35.843854
93
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue547/issue547-v2-sat.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from downward.reports.scatter import ScatterPlotReport import common_setup from relativescatter import RelativeScatterPlotReport SEARCH_REVS = ["issue547-base", "issue547-v2"] SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { 'astar_blind': [ '--search', 'astar(blind())'], 'lazy_greedy_cg': [ '--heuristic', 'h=cg()', '--search', 'lazy_greedy(h, preferred=h)'], 'lazy_greedy_cg_randomized': [ '--heuristic', 'h=cg()', '--search', 'lazy_greedy(h, preferred=h, randomize_successors=true)'], 'eager_greedy_ff': [ '--heuristic', 'h=ff()', '--search', 'eager_greedy(h, preferred=h)'], } exp = common_setup.IssueExperiment( revisions=SEARCH_REVS, configs=CONFIGS, suite=SUITE, ) exp.add_search_parser("custom-parser.py") attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"] exp.add_comparison_table_step(attributes=attributes) for conf in CONFIGS: for attr in ("memory", "search_time"): exp.add_report( RelativeScatterPlotReport( attributes=[attr], get_category=lambda run1, run2: run1.get("domain"), filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf] ), outfile='issue547_base_v2-sat_%s_%s.png' % (conf, attr) ) exp()
1,529
25.842105
111
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue547/common_setup.py
# -*- coding: utf-8 -*- import itertools import os import platform import sys from lab.environments import LocalEnvironment, MaiaEnvironment from lab.experiment import ARGPARSER from lab.reports import Table from lab.steps import Step from downward.experiments import DownwardExperiment, _get_rev_nick from downward.checkouts import Translator, Preprocessor, Planner from downward.reports import PlanningReport from downward.reports.absolute import AbsoluteReport from downward.reports.compare import CompareRevisionsReport from downward.reports.scatter import ScatterPlotReport def parse_args(): ARGPARSER.add_argument( "--test", choices=["yes", "no", "auto"], default="auto", dest="test_run", help="test experiment locally on a small suite if --test=yes or " "--test=auto and we are not on a cluster") return ARGPARSER.parse_args() ARGS = parse_args() def get_script(): """Get file name of main script.""" import __main__ return __main__.__file__ def get_script_dir(): """Get directory of main script. Usually a relative directory (depends on how it was called by the user.)""" return os.path.dirname(get_script()) def get_experiment_name(): """Get name for experiment. Derived from the absolute filename of the main script, e.g. "/ham/spam/eggs.py" => "spam-eggs".""" script = os.path.abspath(get_script()) script_dir = os.path.basename(os.path.dirname(script)) script_base = os.path.splitext(os.path.basename(script))[0] return "%s-%s" % (script_dir, script_base) def get_data_dir(): """Get data dir for the experiment. This is the subdirectory "data" of the directory containing the main script.""" return os.path.join(get_script_dir(), "data", get_experiment_name()) def get_repo_base(): """Get base directory of the repository, as an absolute path. Search upwards in the directory tree from the main script until a directory with a subdirectory named ".hg" is found. Abort if the repo base cannot be found.""" path = os.path.abspath(get_script_dir()) while os.path.dirname(path) != path: if os.path.exists(os.path.join(path, ".hg")): return path path = os.path.dirname(path) sys.exit("repo base could not be found") def is_running_on_cluster(): node = platform.node() return ("cluster" in node or node.startswith("gkigrid") or node in ["habakuk", "turtur"]) def is_test_run(): return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and not is_running_on_cluster()) class IssueExperiment(DownwardExperiment): """Wrapper for DownwardExperiment with a few convenience features.""" DEFAULT_TEST_SUITE = "gripper:prob01.pddl" # TODO: Add something about errors/exit codes. DEFAULT_TABLE_ATTRIBUTES = [ "cost", "coverage", "evaluations", "expansions", "expansions_until_last_jump", "generated", "memory", "quality", "run_dir", "score_evaluations", "score_expansions", "score_generated", "score_memory", "score_search_time", "score_total_time", "search_time", "total_time", ] DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ "evaluations", "expansions", "expansions_until_last_jump", "initial_h_value", "memory", "search_time", "total_time", ] PORTFOLIO_ATTRIBUTES = [ "cost", "coverage", "plan_length", ] def __init__(self, configs, suite, grid_priority=None, path=None, repo=None, revisions=None, search_revisions=None, test_suite=None, **kwargs): """Create a DownwardExperiment with some convenience features. *configs* must be a non-empty dict of {nick: cmdline} pairs that sets the planner configurations to test. :: IssueExperiment(configs={ "lmcut": ["--search", "astar(lmcut())"], "ipdb": ["--search", "astar(ipdb())"]}) *suite* sets the benchmarks for the experiment. It must be a single string or a list of strings specifying domains or tasks. The downward.suites module has many predefined suites. :: IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) from downward import suites IssueExperiment(suite=suites.suite_all()) IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) IssueExperiment(suite=suites.suite_optimal()) Use *grid_priority* to set the job priority for cluster experiments. It must be in the range [-1023, 0] where 0 is the highest priority. By default the priority is 0. :: IssueExperiment(grid_priority=-500) If *path* is specified, it must be the path to where the experiment should be built (e.g. /home/john/experiments/issue123/exp01/). If omitted, the experiment path is derived automatically from the main script's filename. Example:: script = experiments/issue123/exp01.py --> path = experiments/issue123/data/issue123-exp01/ If *repo* is specified, it must be the path to the root of a local Fast Downward repository. If omitted, the repository is derived automatically from the main script's path. Example:: script = /path/to/fd-repo/experiments/issue123/exp01.py --> repo = /path/to/fd-repo If *revisions* is specified, it should be a non-empty list of revisions, which specify which planner versions to use in the experiment. The same versions are used for translator, preprocessor and search. :: IssueExperiment(revisions=["issue123", "4b3d581643"]) If *search_revisions* is specified, it should be a non-empty list of revisions, which specify which search component versions to use in the experiment. All runs use the translator and preprocessor component of the first revision. :: IssueExperiment(search_revisions=["default", "issue123"]) If you really need to specify the (translator, preprocessor, planner) triples manually, use the *combinations* parameter from the base class (might be deprecated soon). The options *revisions*, *search_revisions* and *combinations* can be freely mixed, but at least one of them must be given. Specify *test_suite* to set the benchmarks for experiment test runs. By default the first gripper task is used. IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) """ if is_test_run(): kwargs["environment"] = LocalEnvironment() suite = test_suite or self.DEFAULT_TEST_SUITE elif "environment" not in kwargs: kwargs["environment"] = MaiaEnvironment(priority=grid_priority) if path is None: path = get_data_dir() if repo is None: repo = get_repo_base() kwargs.setdefault("combinations", []) if not any([revisions, search_revisions, kwargs["combinations"]]): raise ValueError('At least one of "revisions", "search_revisions" ' 'or "combinations" must be given') if revisions: kwargs["combinations"].extend([ (Translator(repo, rev), Preprocessor(repo, rev), Planner(repo, rev)) for rev in revisions]) if search_revisions: base_rev = search_revisions[0] # Use the same nick for all parts to get short revision nick. kwargs["combinations"].extend([ (Translator(repo, base_rev, nick=rev), Preprocessor(repo, base_rev, nick=rev), Planner(repo, rev, nick=rev)) for rev in search_revisions]) DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) self._config_nicks = [] for nick, config in configs.items(): self.add_config(nick, config) self.add_suite(suite) @property def revision_nicks(self): # TODO: Once the add_algorithm() API is available we should get # rid of the call to _get_rev_nick() and avoid inspecting the # list of combinations by setting and saving the algorithm nicks. return [_get_rev_nick(*combo) for combo in self.combinations] def add_config(self, nick, config, timeout=None): DownwardExperiment.add_config(self, nick, config, timeout=timeout) self._config_nicks.append(nick) def add_absolute_report_step(self, **kwargs): """Add step that makes an absolute report. Absolute reports are useful for experiments that don't compare revisions. The report is written to the experiment evaluation directory. All *kwargs* will be passed to the AbsoluteReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_absolute_report_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) report = AbsoluteReport(**kwargs) outfile = get_experiment_name() + "." + report.output_format self.add_report(report, outfile=outfile) def add_comparison_table_step(self, **kwargs): """Add a step that makes pairwise revision comparisons. Create comparative reports for all pairs of Fast Downward revision triples. Each report pairs up the runs of the same config and lists the two absolute attribute values and their difference for all attributes in kwargs["attributes"]. All *kwargs* will be passed to the CompareRevisionsReport class. If the keyword argument *attributes* is not specified, a default list of attributes is used. :: exp.add_comparison_table_step(attributes=["coverage"]) """ kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) def make_comparison_tables(): for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): report = CompareRevisionsReport(rev1, rev2, **kwargs) outfile = os.path.join(self.eval_dir, "%s-%s-compare.html" % (rev1, rev2)) report(self.eval_dir, outfile) self.add_step(Step("make-comparison-tables", make_comparison_tables)) def add_scatter_plot_step(self, attributes=None): """Add a step that creates scatter plots for all revision pairs. Create a scatter plot for each combination of attribute, configuration and revision pair. If *attributes* is not specified, a list of common scatter plot attributes is used. For portfolios all attributes except "cost", "coverage" and "plan_length" will be ignored. :: exp.add_scatter_plot_step(attributes=["expansions"]) """ if attributes is None: attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES scatter_dir = os.path.join(self.eval_dir, "scatter") def is_portfolio(config_nick): return "fdss" in config_nick def make_scatter_plots(): for config_nick in self._config_nicks: for rev1, rev2 in itertools.combinations( self.revision_nicks, 2): algo1 = "%s-%s" % (rev1, config_nick) algo2 = "%s-%s" % (rev2, config_nick) if is_portfolio(config_nick): valid_attributes = [ attr for attr in attributes if attr in self.PORTFOLIO_ATTRIBUTES] else: valid_attributes = attributes for attribute in valid_attributes: name = "-".join([rev1, rev2, attribute, config_nick]) print "Make scatter plot for", name report = ScatterPlotReport( filter_config=[algo1, algo2], attributes=[attribute], get_category=lambda run1, run2: run1["domain"], legend_location=(1.3, 0.5)) report(self.eval_dir, os.path.join(scatter_dir, name)) self.add_step(Step("make-scatter-plots", make_scatter_plots)) class RegressionReport(PlanningReport): """ Compare revisions for tasks on which the first revision performs better than other revisions. *revision_nicks* must be a list of revision_nicks, e.g. ["default", "issue123"]. *config_nicks* must be a list of configuration nicknames, e.g. ["eager_greedy_ff", "eager_greedy_add"]. *regression_attribute* is the attribute that we compare between different revisions. It defaults to "coverage". Example comparing search_time for tasks were we lose coverage:: exp.add_report(RegressionReport(revision_nicks=["default", "issue123"], config_nicks=["eager_greedy_ff"], regression_attribute="coverage", attributes="search_time")) """ def __init__(self, revision_nicks, config_nicks, regression_attribute="coverage", **kwargs): PlanningReport.__init__(self, **kwargs) assert revision_nicks self.revision_nicks = revision_nicks assert config_nicks self.config_nicks = config_nicks self.regression_attribute = regression_attribute def get_markup(self): tables = [] for (domain, problem) in self.problems: for config_nick in self.config_nicks: runs = [self.runs[(domain, problem, rev + "-" + config_nick)] for rev in self.revision_nicks] if any(runs[0][self.regression_attribute] > runs[i][self.regression_attribute] for i in range(1, len(self.revision_nicks))): print "\"%s:%s\"," % (domain, problem) table = Table() for rev, run in zip(self.revision_nicks, runs): for attr in self.attributes: table.add_cell(rev, attr, run.get(attr)) table_name = ":".join((domain, problem, config_nick)) tables.append((table_name, table)) return "\n".join(name + "\n" + str(table) for name, table in tables)
14,920
36.3025
79
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue547/relativescatter.py
# -*- coding: utf-8 -*- # # downward uses the lab package to conduct experiments with the # Fast Downward planning system. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from collections import defaultdict import os from lab import tools from matplotlib import ticker from downward.reports.scatter import ScatterPlotReport from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot # TODO: handle outliers # TODO: this is mostly copied from ScatterMatplotlib (scatter.py) class RelativeScatterMatplotlib(Matplotlib): @classmethod def _plot(cls, report, axes, categories, styles): # Display grid axes.grid(b=True, linestyle='-', color='0.75') has_points = False # Generate the scatter plots for category, coords in sorted(categories.items()): X, Y = zip(*coords) axes.scatter(X, Y, s=42, label=category, **styles[category]) if X and Y: has_points = True if report.xscale == 'linear' or report.yscale == 'linear': plot_size = report.missing_val * 1.01 else: plot_size = report.missing_val * 1.25 # make 5 ticks above and below 1 yticks = [] tick_step = report.ylim_top**(1/5.0) for i in xrange(-5, 6): yticks.append(tick_step**i) axes.set_yticks(yticks) axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) for axis in [axes.xaxis, axes.yaxis]: MatplotlibPlot.change_axis_formatter(axis, report.missing_val if report.show_missing else None) return has_points class RelativeScatterPlotReport(ScatterPlotReport): """ Generate a scatter plot that shows how a specific attribute in two configurations. The attribute value in config 1 is shown on the x-axis and the relation to the value in config 2 on the y-axis. """ def __init__(self, show_missing=True, get_category=None, **kwargs): ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) if self.output_format == 'tex': raise "not supported" else: self.writer = RelativeScatterMatplotlib def _fill_categories(self, runs): # We discard the *runs* parameter. # Map category names to value tuples categories = defaultdict(list) self.ylim_bottom = 2 self.ylim_top = 0.5 self.xlim_left = float("inf") for (domain, problem), runs in self.problem_runs.items(): if len(runs) != 2: continue run1, run2 = runs assert (run1['config'] == self.configs[0] and run2['config'] == self.configs[1]) val1 = run1.get(self.attribute) val2 = run2.get(self.attribute) if val1 is None or val2 is None: continue category = self.get_category(run1, run2) assert val1 > 0, (domain, problem, self.configs[0], val1) assert val2 > 0, (domain, problem, self.configs[1], val2) x = val1 y = val2 / float(val1) categories[category].append((x, y)) self.ylim_top = max(self.ylim_top, y) self.ylim_bottom = min(self.ylim_bottom, y) self.xlim_left = min(self.xlim_left, x) # center around 1 if self.ylim_bottom < 1: self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) if self.ylim_top > 1: self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) return categories def _set_scales(self, xscale, yscale): # ScatterPlots use log-scaling on the x-axis by default. default_xscale = 'log' if self.attribute and self.attribute in self.LINEAR: default_xscale = 'linear' PlotReport._set_scales(self, xscale or default_xscale, 'log')
4,690
35.937008
84
py
DAAISy
DAAISy-main/dependencies/FD/experiments/issue547/issue547-v2-lama.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from downward import suites from downward.reports.scatter import ScatterPlotReport import common_setup from relativescatter import RelativeScatterPlotReport SEARCH_REVS = ["issue547-base", "issue547-v2"] SUITE = suites.suite_satisficing_with_ipc11() CONFIGS = { 'lama-2011-first': [ "--if-unit-cost", "--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", "--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm])", "--if-non-unit-cost", "--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true," " lm_cost_type=one,cost_type=one))", "--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true," " lm_cost_type=plusone,cost_type=plusone))", "--search", "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], cost_type=one,reopen_closed=false)", ], } exp = common_setup.IssueExperiment( revisions=SEARCH_REVS, configs=CONFIGS, suite=SUITE, ) exp.add_search_parser("custom-parser.py") attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"] exp.add_comparison_table_step(attributes=attributes) for conf in CONFIGS: for attr in ("memory", "search_time"): exp.add_report( RelativeScatterPlotReport( attributes=[attr], get_category=lambda run1, run2: run1.get("domain"), filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf] ), outfile='issue547_base_v2-sat_%s_%s.png' % (conf, attr) ) exp()
1,723
30.345455
111
py