repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue621/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import os
from lab import tools
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,954 | 34.954545 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue724/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue724-base", "issue724-v1"]
CONFIGS = [
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']),
IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS", "PYTHONPATH"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attribute in ["total_time"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)
)
exp.run_steps()
| 1,587 | 31.408163 | 124 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue724/v1-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue724-base", "issue724-v2"]
CONFIGS = [
IssueConfig('lama-first', [], driver_options=["--alias", "lama-first"]),
IssueConfig("ehc-ff", ["--search", "ehc(ff())"]),
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS", "PYTHONPATH"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attribute in ["total_time"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)
)
exp.run_steps()
| 1,507 | 30.416667 | 124 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue724/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'pipesworld-notankage',
'pipesworld-tankage', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage',
'tetris-opt14-strips', 'tidybot-opt11-strips',
'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips',
'transport-opt11-strips', 'transport-opt14-strips',
'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips',
'woodworking-opt08-strips', 'woodworking-opt11-strips',
'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'airport', 'assembly', 'barman-sat11-strips',
'barman-sat14-strips', 'blocks', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot',
'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell',
'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips',
'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic',
'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime',
'mystery', 'nomystery-sat11-strips', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,171 | 35.715026 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue724/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue540/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue540-base", "issue540-v1"]
LIMITS = {"search_time": 300}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"ipdb": ["--search", "astar(ipdb(max_time=150))"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 491 | 17.923077 | 54 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue540/v1-sat.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue540-base", "issue540-v1"]
LIMITS = {"search_time": 300}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"lama-first": [
"--if-unit-cost",
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))",
"--search",
"lazy_greedy([hff,hlm],preferred=[hff,hlm])",
"--if-non-unit-cost",
"--heuristic",
"hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))",
"--search",
"lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],cost_type=one,reopen_closed=false)",
"--always"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 940 | 24.432432 | 97 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue540/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-%s-compare.html" %
(self.name, rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config_nick in self._config_nicks:
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
for attribute in valid_attributes:
make_scatter_plot(config_nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,755 | 35.135977 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue862/translator_additional_parser.py
|
#!/usr/bin/env python
import hashlib
from lab.parser import Parser
def add_hash_value(content, props):
props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest()
parser = Parser()
parser.add_function(add_hash_value, file="output.sas")
parser.parse()
| 273 | 20.076923 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue862/v1.py
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab import tools
from downward.reports.compare import ComparativeReport
from downward.reports import PlanningReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue862-base", "issue862-v1", "issue862-v2", "issue862-v3"]
CONFIGS = [
IssueConfig(
"translate-only",
[],
driver_options=["--translate"])
]
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
# This was generated by running "./suites.py all" in the benchmarks
# repository. I don't know if this is there is a better way of doing
# this.
SUITE = [
'agricola-opt18-strips',
'agricola-sat18-strips',
'airport',
'airport-adl',
'assembly',
'barman-mco14-strips',
'barman-opt11-strips',
'barman-opt14-strips',
'barman-sat11-strips',
'barman-sat14-strips',
'blocks',
'caldera-opt18-adl',
'caldera-sat18-adl',
'caldera-split-opt18-adl',
'caldera-split-sat18-adl',
'cavediving-14-adl',
'childsnack-opt14-strips',
'childsnack-sat14-strips',
'citycar-opt14-adl',
'citycar-sat14-adl',
'data-network-opt18-strips',
'data-network-sat18-strips',
'depot',
'driverlog',
'elevators-opt08-strips',
'elevators-opt11-strips',
'elevators-sat08-strips',
'elevators-sat11-strips',
'flashfill-sat18-adl',
'floortile-opt11-strips',
'floortile-opt14-strips',
'floortile-sat11-strips',
'floortile-sat14-strips',
'freecell',
'ged-opt14-strips',
'ged-sat14-strips',
'grid',
'gripper',
'hiking-agl14-strips',
'hiking-opt14-strips',
'hiking-sat14-strips',
'logistics00',
'logistics98',
'maintenance-opt14-adl',
'maintenance-sat14-adl',
'miconic',
'miconic-fulladl',
'miconic-simpleadl',
'movie',
'mprime',
'mystery',
'no-mprime',
'no-mystery',
'nomystery-opt11-strips',
'nomystery-sat11-strips',
'nurikabe-opt18-adl',
'nurikabe-sat18-adl',
'openstacks',
'openstacks-agl14-strips',
'openstacks-opt08-adl',
'openstacks-opt08-strips',
'openstacks-opt11-strips',
'openstacks-opt14-strips',
'openstacks-sat08-adl',
'openstacks-sat08-strips',
'openstacks-sat11-strips',
'openstacks-sat14-strips',
'openstacks-strips',
'optical-telegraphs',
'organic-synthesis-opt18-strips',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-opt18-strips',
'organic-synthesis-split-sat18-strips',
'parcprinter-08-strips',
'parcprinter-opt11-strips',
'parcprinter-sat11-strips',
'parking-opt11-strips',
'parking-opt14-strips',
'parking-sat11-strips',
'parking-sat14-strips',
'pathways',
'pathways-noneg',
'pegsol-08-strips',
'pegsol-opt11-strips',
'pegsol-sat11-strips',
'petri-net-alignment-opt18-strips',
'philosophers',
'pipesworld-notankage',
'pipesworld-tankage',
'psr-large',
'psr-middle',
'psr-small',
'rovers',
'satellite',
'scanalyzer-08-strips',
'scanalyzer-opt11-strips',
'scanalyzer-sat11-strips',
'schedule',
'settlers-opt18-adl',
'settlers-sat18-adl',
'snake-opt18-strips',
'snake-sat18-strips',
'sokoban-opt08-strips',
'sokoban-opt11-strips',
'sokoban-sat08-strips',
'sokoban-sat11-strips',
'spider-opt18-strips',
'spider-sat18-strips',
'storage',
'termes-opt18-strips',
'termes-sat18-strips',
'tetris-opt14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'thoughtful-sat14-strips',
'tidybot-opt11-strips',
'tidybot-opt14-strips',
'tidybot-sat11-strips',
'tpp',
'transport-opt08-strips',
'transport-opt11-strips',
'transport-opt14-strips',
'transport-sat08-strips',
'transport-sat11-strips',
'transport-sat14-strips',
'trucks',
'trucks-strips',
'visitall-opt11-strips',
'visitall-opt14-strips',
'visitall-sat11-strips',
'visitall-sat14-strips',
'woodworking-opt08-strips',
'woodworking-opt11-strips',
'woodworking-sat08-strips',
'woodworking-sat11-strips',
'zenotravel',
]
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_resource("translator_additional_parser",
"translator_additional_parser.py",
dest="translator_additional_parser.py")
del exp.commands['remove-output-sas']
exp.add_command("translator_additional_parser",
["{translator_additional_parser}"])
class TranslatorDiffReport(PlanningReport):
def get_cell(self, run):
return ";".join(run.get(attr) for attr in self.attributes)
def get_text(self):
lines = []
for runs in self.problem_runs.values():
hashes = set([r.get("translator_output_sas_hash") for r in runs])
if len(hashes) > 1 or None in hashes:
lines.append(";".join([self.get_cell(r) for r in runs]))
return "\n".join(lines)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_report(TranslatorDiffReport(
attributes=["domain", "problem", "algorithm", "run_dir"]
), outfile="different_output_sas.csv"
)
exp.run_steps()
| 5,753 | 26.141509 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue862/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, outfile=None, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = outfile or os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-' + os.path.basename(outfile), subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,824 | 36.531646 | 90 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue862/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue862/v5-planner.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue862-base", "issue862-v5"]
BUILDS = ["release32"]
CONFIG_DICT = {
"lazy-greedy-{h}".format(**locals()): [
"--evaluator",
"h={h}()".format(**locals()),
"--search",
"lazy_greedy([h], preferred=[h])"]
for h in ["hmax", "add", "ff", "cg", "cea"]
}
CONFIG_DICT["lama-first"] = [
"--evaluator",
"hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)",
"--evaluator", "hff=ff(transform=adapt_costs(one))",
"--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm],
cost_type=one,reopen_closed=false)"""]
CONFIG_DICT["blind"] = ["--search", "astar(blind())"]
CONFIGS = [
IssueConfig(
"-".join([config_nick, build]),
config,
build_options=[build],
driver_options=["--build", build, "--overall-time-limit", "30m"])
for build in BUILDS
for config_nick, config in CONFIG_DICT.items()
]
SUITE = [
"airport-adl",
"assembly",
"miconic-fulladl",
"psr-large",
"psr-middle",
]
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step("build", exp.build)
exp.add_step("start", exp.start_runs)
exp.add_fetcher(name="fetch")
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.run_steps()
| 2,260 | 27.620253 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue862/v5.py
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
from collections import defaultdict
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab import tools
from downward.reports.compare import ComparativeReport
from downward.reports import PlanningReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue862-base", "issue862-v1", "issue862-v4", "issue862-v5"]
CONFIGS = [
IssueConfig(
"translate-only",
[],
driver_options=["--translate"])
]
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
# This was generated by running "./suites.py all" in the benchmarks
# repository.
SUITE = [
'agricola-opt18-strips',
'agricola-sat18-strips',
'airport',
'airport-adl',
'assembly',
'barman-mco14-strips',
'barman-opt11-strips',
'barman-opt14-strips',
'barman-sat11-strips',
'barman-sat14-strips',
'blocks',
'caldera-opt18-adl',
'caldera-sat18-adl',
'caldera-split-opt18-adl',
'caldera-split-sat18-adl',
'cavediving-14-adl',
'childsnack-opt14-strips',
'childsnack-sat14-strips',
'citycar-opt14-adl',
'citycar-sat14-adl',
'data-network-opt18-strips',
'data-network-sat18-strips',
'depot',
'driverlog',
'elevators-opt08-strips',
'elevators-opt11-strips',
'elevators-sat08-strips',
'elevators-sat11-strips',
'flashfill-sat18-adl',
'floortile-opt11-strips',
'floortile-opt14-strips',
'floortile-sat11-strips',
'floortile-sat14-strips',
'freecell',
'ged-opt14-strips',
'ged-sat14-strips',
'grid',
'gripper',
'hiking-agl14-strips',
'hiking-opt14-strips',
'hiking-sat14-strips',
'logistics00',
'logistics98',
'maintenance-opt14-adl',
'maintenance-sat14-adl',
'miconic',
'miconic-fulladl',
'miconic-simpleadl',
'movie',
'mprime',
'mystery',
'no-mprime',
'no-mystery',
'nomystery-opt11-strips',
'nomystery-sat11-strips',
'nurikabe-opt18-adl',
'nurikabe-sat18-adl',
'openstacks',
'openstacks-agl14-strips',
'openstacks-opt08-adl',
'openstacks-opt08-strips',
'openstacks-opt11-strips',
'openstacks-opt14-strips',
'openstacks-sat08-adl',
'openstacks-sat08-strips',
'openstacks-sat11-strips',
'openstacks-sat14-strips',
'openstacks-strips',
'optical-telegraphs',
'organic-synthesis-opt18-strips',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-opt18-strips',
'organic-synthesis-split-sat18-strips',
'parcprinter-08-strips',
'parcprinter-opt11-strips',
'parcprinter-sat11-strips',
'parking-opt11-strips',
'parking-opt14-strips',
'parking-sat11-strips',
'parking-sat14-strips',
'pathways',
'pathways-noneg',
'pegsol-08-strips',
'pegsol-opt11-strips',
'pegsol-sat11-strips',
'petri-net-alignment-opt18-strips',
'philosophers',
'pipesworld-notankage',
'pipesworld-tankage',
'psr-large',
'psr-middle',
'psr-small',
'rovers',
'satellite',
'scanalyzer-08-strips',
'scanalyzer-opt11-strips',
'scanalyzer-sat11-strips',
'schedule',
'settlers-opt18-adl',
'settlers-sat18-adl',
'snake-opt18-strips',
'snake-sat18-strips',
'sokoban-opt08-strips',
'sokoban-opt11-strips',
'sokoban-sat08-strips',
'sokoban-sat11-strips',
'spider-opt18-strips',
'spider-sat18-strips',
'storage',
'termes-opt18-strips',
'termes-sat18-strips',
'tetris-opt14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'thoughtful-sat14-strips',
'tidybot-opt11-strips',
'tidybot-opt14-strips',
'tidybot-sat11-strips',
'tpp',
'transport-opt08-strips',
'transport-opt11-strips',
'transport-opt14-strips',
'transport-sat08-strips',
'transport-sat11-strips',
'transport-sat14-strips',
'trucks',
'trucks-strips',
'visitall-opt11-strips',
'visitall-opt14-strips',
'visitall-sat11-strips',
'visitall-sat14-strips',
'woodworking-opt08-strips',
'woodworking-opt11-strips',
'woodworking-sat08-strips',
'woodworking-sat11-strips',
'zenotravel',
]
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser("translator_additional_parser.py")
del exp.commands['remove-output-sas']
class TranslatorDiffReport(PlanningReport):
def get_cell(self, run):
return ";".join(run.get(attr) for attr in self.attributes)
def get_text(self):
lines = []
for runs in self.problem_runs.values():
hashes = set([r.get("translator_output_sas_hash") for r in runs])
if len(hashes) > 1 or None in hashes:
lines.append(";".join([self.get_cell(r) for r in runs]))
return "\n".join(lines)
class SameValueFilters(object):
"""Ignore runs for a task where all algorithms have the same value."""
def __init__(self, attribute):
self._attribute = attribute
self._tasks_to_values = defaultdict(list)
def _get_task(self, run):
return (run['domain'], run['problem'])
def store_values(self, run):
value = run.get(self._attribute)
self._tasks_to_values[self._get_task(run)].append(value)
# Don't filter this run, yet.
return True
def filter_tasks_with_equal_values(self, run):
values = self._tasks_to_values[self._get_task(run)]
return len(set(values)) != 1
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_parse_again_step()
exp.add_fetcher(name='fetch')
ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"]
exp.add_absolute_report_step(
outfile=os.path.join(exp.eval_dir, "{EXPNAME}.html".format(**locals())),
attributes=ATTRIBUTES)
same_value_flters = SameValueFilters("translator_output_sas_hash")
exp.add_absolute_report_step(
outfile=os.path.join(exp.eval_dir, "{EXPNAME}-filtered.html".format(**locals())),
attributes=ATTRIBUTES,
filter=[same_value_flters.store_values, same_value_flters.filter_tasks_with_equal_values])
exp.add_report(TranslatorDiffReport(
attributes=["domain", "problem", "algorithm", "run_dir"]
), outfile="different_output_sas.csv"
)
exp.run_steps()
| 6,873 | 27.404959 | 94 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue439/issue439.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs, suites
import common_setup
SEARCH_REVS = ["issue439-base", "issue439-v1"]
LIMITS = {"search_time": 300}
SUITE = suites.suite_satisficing_with_ipc11()
configs_satisficing_core = configs.configs_satisficing_core()
CONFIGS = {}
for name in ["eager_greedy_add", "eager_greedy_ff",
"lazy_greedy_add", "lazy_greedy_ff"]:
CONFIGS[name] = configs_satisficing_core[name]
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_report(common_setup.RegressionReport(
revision_nicks=exp.revision_nicks,
config_nicks=CONFIGS.keys()))
exp()
| 779 | 21.941176 | 61 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue439/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.reports import Table
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports import PlanningReport
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
# TODO: Add something about errors/exit codes.
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" % (rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for attribute in valid_attributes:
name = "-".join([rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
class RegressionReport(PlanningReport):
"""
Compare revisions for tasks on which the first revision performs
better than other revisions.
*revision_nicks* must be a list of revision_nicks, e.g.
["default", "issue123"].
*config_nicks* must be a list of configuration nicknames, e.g.
["eager_greedy_ff", "eager_greedy_add"].
*regression_attribute* is the attribute that we compare between
different revisions. It defaults to "coverage".
Example comparing search_time for tasks were we lose coverage::
exp.add_report(RegressionReport(revision_nicks=["default", "issue123"],
config_nicks=["eager_greedy_ff"],
regression_attribute="coverage",
attributes="search_time"))
"""
def __init__(self, revision_nicks, config_nicks,
regression_attribute="coverage", **kwargs):
PlanningReport.__init__(self, **kwargs)
assert revision_nicks
self.revision_nicks = revision_nicks
assert config_nicks
self.config_nicks = config_nicks
self.regression_attribute = regression_attribute
def get_markup(self):
tables = []
for (domain, problem) in self.problems:
for config_nick in self.config_nicks:
runs = [self.runs[(domain, problem, rev + "-" + config_nick)]
for rev in self.revision_nicks]
if any(runs[0][self.regression_attribute] >
runs[i][self.regression_attribute]
for i in range(1, len(self.revision_nicks))):
print "\"%s:%s\"," % (domain, problem)
table = Table()
for rev, run in zip(self.revision_nicks, runs):
for attr in self.attributes:
table.add_cell(rev, attr, run.get(attr))
table_name = ":".join((domain, problem, config_nick))
tables.append((table_name, table))
return "\n".join(name + "\n" + str(table) for name, table in tables)
| 14,920 | 36.3025 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue439/issue439-30min.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs, suites
import common_setup
SEARCH_REVS = ["issue439-base", "issue439-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
configs_satisficing_core = configs.configs_satisficing_core()
CONFIGS = {}
for name in ["eager_greedy_add", "eager_greedy_ff",
"lazy_greedy_add", "lazy_greedy_ff"]:
CONFIGS[name] = configs_satisficing_core[name]
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_search_parser("custom-parser.py")
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"]
exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
exp.add_report(common_setup.RegressionReport(
revision_nicks=exp.revision_nicks,
config_nicks=CONFIGS.keys(),
attributes=attributes))
exp()
| 960 | 24.972973 | 68 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue439/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
class CustomParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern(
"init_time",
"Best heuristic value: \d+ \[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]",
required=True,
type=float)
if __name__ == "__main__":
parser = CustomParser()
print "Running custom parser"
parser.parse()
| 441 | 21.1 | 90 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue439/regressions.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import configs
import common_setup
SEARCH_REVS = ["issue439-base", "issue439-v1"]
LIMITS = {"search_time": 1800}
SUITE = [
"airport:p45-airport5MUC-p6.pddl",
"elevators-sat08-strips:p22.pddl",
"parking-sat11-strips:pfile09-033.pddl",
"scanalyzer-08-strips:p30.pddl",
"transport-sat11-strips:p14.pddl",
"transport-sat11-strips:p16.pddl",
"trucks:p19.pddl",
"trucks-strips:p23.pddl",
]
configs_satisficing_core = configs.configs_satisficing_core()
CONFIGS = {}
for name in ["eager_greedy_add", "eager_greedy_ff",
"lazy_greedy_add", "lazy_greedy_ff"]:
CONFIGS[name] = configs_satisficing_core[name]
exp = common_setup.IssueExperiment(
revisions=SEARCH_REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_search_parser("custom-parser.py")
attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"]
exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
exp.add_report(common_setup.RegressionReport(
revision_nicks=exp.revision_nicks,
config_nicks=CONFIGS.keys(),
attributes=attributes))
exp()
| 1,209 | 25.304348 | 68 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue392/lama-unit.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue392-v2"]
LIMITS = {"search_time": 300}
CONFIGS = {}
for randomize in ["false", "true"]:
for pref_first in ["false", "true"]:
CONFIGS["lama-unit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))",
"--search",
"iterated(["
"lazy_greedy([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s),"
"lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5),"
"lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3),"
"lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2),"
"lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)],"
"repeat_last=true,continue_on_fail=true)" % locals()
]
SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) &
set(suites.suite_unit_costs()))
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp()
| 1,532 | 35.5 | 139 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue392/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
# TODO: Add something about errors/exit codes.
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" % (rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for attribute in valid_attributes:
name = "-".join([rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,677 | 35.431034 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue392/lama-nonunit.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue392-v2"]
LIMITS = {"search_time": 300}
CONFIGS = {}
for randomize in ["false", "true"]:
for pref_first in ["false", "true"]:
CONFIGS["lama-nonunit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [
"--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=ONE,cost_type=ONE))",
"--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))",
"--search",
"iterated(["
"lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,cost_type=ONE,reopen_closed=false),"
"lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,reopen_closed=false),"
"lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5),"
"lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3),"
"lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2),"
"lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)],"
"repeat_last=true,continue_on_fail=true)" % locals()
]
SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) &
set(suites.suite_diverse_costs()))
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp()
| 1,902 | 43.255814 | 176 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue182/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import common_setup
from common_setup import IssueConfig, IssueExperiment
import suites
heuristics = [
"{}(cache_estimates=false)".format(h) for h in (
"pdb", "cpdbs", "diverse_potentials", "all_states_potential",
"initial_state_potential", "sample_based_potentials")]
max_eval = "max([{}])".format(",".join(heuristics))
ipc_max = "ipc_max([{}],cache_estimates=false)".format(",".join(heuristics))
configs = [
IssueConfig(
name,
["--search", "astar({})".format(eval_)])
for name, eval_ in [("max", max_eval), ("ipc_max", ipc_max)]
]
revision = "1e84d77e4e37"
exp = IssueExperiment(
revisions=[revision],
configs=configs,
suite=suites.suite_optimal_strips(),
test_suite=["depot:pfile1"],
email="[email protected]",
)
exp.add_absolute_report_step()
exp.add_report(
common_setup.CompareConfigsReport(
[(revision + "-" + "ipc_max", revision + "-" + "max")],
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name=common_setup.get_experiment_name() + "-compare")
exp()
| 1,117 | 26.268293 | 76 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue182/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from common_setup import IssueConfig, IssueExperiment
import suites
configs = [
IssueConfig(
func,
["--search", "astar({}([ipdb(max_time=5),diverse_potentials(),all_states_potential(),initial_state_potential(),sample_based_potentials()]))".format(func)])
for func in ["max", "ipc_max"]
]
revisions = ["8f1563b36fc7"]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suites.suite_optimal_strips(),
test_suite=["depot:pfile1"],
email="[email protected]",
)
exp.add_absolute_report_step()
exp()
| 613 | 21.740741 | 163 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue182/v1-no-cache.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import common_setup
from common_setup import IssueConfig, IssueExperiment
import suites
heuristics = [
"{}(cache_estimates=false)".format(h) for h in (
"pdb", "cpdbs", "diverse_potentials", "all_states_potential",
"initial_state_potential", "sample_based_potentials")]
max_eval = "max([{}])".format(",".join(heuristics))
ipc_max = "ipc_max([{}],cache_estimates=false)".format(",".join(heuristics))
configs = [
IssueConfig(
name,
["--search", "astar({})".format(eval_)])
for name, eval_ in [("max", max_eval), ("ipc_max", ipc_max)]
]
revision = "8f1563b36fc7"
exp = IssueExperiment(
revisions=[revision],
configs=configs,
suite=suites.suite_optimal_strips(),
test_suite=["depot:pfile1"],
email="[email protected]",
)
exp.add_absolute_report_step()
exp.add_report(
common_setup.CompareConfigsReport(
[(revision + "-" + "ipc_max", revision + "-" + "max")],
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name=common_setup.get_experiment_name() + "-compare")
exp()
| 1,117 | 26.268293 | 76 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue182/suites.py
|
# Benchmark suites from the Fast Downward benchmark collection.
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-agl14-adl',
'citycar-agl14-adl',
'maintenance-agl14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-agl14-strips',
'childsnack-agl14-strips',
'floortile-agl14-strips',
'ged-agl14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-agl14-strips',
'tetris-agl14-strips',
'thoughtful-agl14-strips',
'transport-agl14-strips',
'visitall-agl14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-mco14-adl',
'citycar-mco14-adl',
'maintenance-mco14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-mco14-strips',
'floortile-mco14-strips',
'ged-mco14-strips',
'hiking-mco14-strips',
'openstacks-mco14-strips',
'parking-mco14-strips',
'tetris-mco14-strips',
'thoughtful-mco14-strips',
'transport-mco14-strips',
'visitall-mco14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-opt14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-sat14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat())
def suite_unsolvable():
# TODO: Add other unsolvable problems (Miconic-FullADL).
# TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl'
# if the extra-domains branch is merged.
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_alternative_formulations())
| 7,695 | 23.35443 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue182/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"initial_h_value",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, suite, revisions=[], configs={}, grid_priority=None,
path=None, test_suite=None, email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(os.path.join(repo, "benchmarks"), suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,508 | 33.941341 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue344/exp14.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import downward.configs
import downward.suites
import common_setup
exp = common_setup.MyExperiment(
grid_priority=0,
search_revisions=["issue344-base", "issue344-v5"],
configs=downward.configs.default_configs_optimal(),
suite=downward.suites.suite_optimal_with_ipc11(),
do_test_run="auto"
)
exp.add_comparison_table_step()
exp.add_scatter_plot_step()
exp()
| 434 | 18.772727 | 55 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue344/common_setup.py
|
# -*- coding: utf-8 -*-
import os.path
import platform
from lab.environments import MaiaEnvironment
from lab.steps import Step
from downward.checkouts import Translator, Preprocessor, Planner
from downward.experiments import DownwardExperiment
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the filename of the main script, e.g.
"/ham/spam/eggs.py" => "eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Found by searching upwards in the directory tree from the main
script until a directory with a subdirectory named ".hg" is found."""
path = os.path.abspath(get_script_dir())
while True:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
def build_combos_with_names(repo, combinations, revisions, search_revisions):
"""Build (combos, combo_names) lists for the given planner revisions.
combos and combo_names are parallel lists, where combos contains
(Translator, Preprocessor, Search) triples and combo_names are the names
for the respective combinations that lab uses internally.
See MyExperiment.__init__ for documentation of the parameters
combinations, revisions and search_revisions."""
combos = []
names = []
def build(*rev_triple):
combo, name = build_combo_with_name(repo, *rev_triple)
combos.append(combo)
names.append(name)
for triple in combinations or []:
build(triple)
for rev in revisions or []:
build(rev, rev, rev)
for rev in search_revisions or []:
build(search_revisions[0], search_revisions[0], rev)
return combos, names
def build_combo_with_name(repo, trans_rev, preprocess_rev, search_rev):
"""Generate a tuple (combination, name) for the given revisions.
combination is a (Translator, Preprocessor, Search) tuple
and name is the name that lab uses to refer to it."""
# TODO: In the future, it would be nice if we didn't need the name
# information any more, as it is somewhat of an implementation
# detail.
combo = (Translator(repo, trans_rev),
Preprocessor(repo, preprocess_rev),
Planner(repo, search_rev))
if trans_rev == preprocess_rev == search_rev:
name = str(search_rev)
else:
name = "%s-%s-%s" % (trans_rev, preprocess_rev, search_rev)
return combo, name
def is_on_grid():
"""Returns True if the current machine is on the maia grid.
Implemented by checking if host name ends with ".cluster".
"""
return platform.node().endswith(".cluster")
class MyExperiment(DownwardExperiment):
DEFAULT_TEST_SUITE = [
"zenotravel:pfile1",
"zenotravel:pfile2",
]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"total_time",
"search_time",
"memory",
"expansions_until_last_jump",
]
"""Wrapper for DownwardExperiment with a few convenience features."""
def __init__(self, configs=None, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
combinations=None, suite=None, do_test_run="auto",
test_suite=DEFAULT_TEST_SUITE, **kwargs):
"""Create a DownwardExperiment with some convenience features.
If "configs" is specified, it should be a dict of {nick:
cmdline} pairs that sets the planner configurations to test.
If "grid_priority" is specified and no environment is
specifically requested in **kwargs, use the maia environment
with the specified priority.
If "path" is not specified, the experiment data path is
derived automatically from the main script's filename.
If "repo" is not specified, the repository base is derived
automatically from the main script's path.
If "combinations" is specified, it should be a non-empty list
of revision triples of the form (translator_rev,
preprocessor_rev, search_rev).
If "revisions" is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search.
If "search_revisions" is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All experiments use the
translator and preprocessor component of the first
revision.
It is possible to specify a mixture of"combinations",
"revisions" and "search_revisions".
If "suite" is specified, it should specify a problem suite.
If "do_test_run" is true, the "grid_priority" and
"environment" (from the base class) arguments are ignored and
a local experiment with default arguments is run instead. In
this case, the "suite" argument is replaced by the "test_suite"
argument.
If "do_test_run" is the string "auto" (the default), then
do_test_run is set to False when run on a grid machine and
to True otherwise. A grid machine is identified as one whose
node name ends with ".cluster".
"""
if do_test_run == "auto":
do_test_run = not is_on_grid()
if do_test_run:
# In a test run, overwrite certain arguments.
grid_priority = None
kwargs.pop("environment", None)
suite = test_suite
if grid_priority is not None and "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
combinations, self._combination_names = build_combos_with_names(
repo=repo,
combinations=combinations,
revisions=revisions,
search_revisions=search_revisions)
kwargs["combinations"] = combinations
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
if configs is not None:
for nick, config in configs.items():
self.add_config(nick, config)
if suite is not None:
self.add_suite(suite)
self._report_prefix = get_experiment_name()
def add_comparison_table_step(self, attributes=None):
revisions = self._combination_names
if len(revisions) != 2:
# TODO: Should generalize this by offering a general
# grouping function and then comparing any pair of
# settings in the same group.
raise NotImplementedError("need two revisions")
if attributes is None:
attributes = self.DEFAULT_TABLE_ATTRIBUTES
report = CompareRevisionsReport(*revisions, attributes=attributes)
self.add_report(report, outfile="%s-compare.html" % self._report_prefix)
def add_scatter_plot_step(self, attributes=None):
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
revisions = self._combination_names
if len(revisions) != 2:
# TODO: Should generalize this by offering a general
# grouping function and then comparing any pair of
# settings in the same group.
raise NotImplementedError("need two revisions")
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plots():
configs = [conf[0] for conf in self.configs]
for nick in configs:
config_before = "%s-%s" % (revisions[0], nick)
config_after = "%s-%s" % (revisions[1], nick)
for attribute in attributes:
name = "%s-%s-%s" % (self._report_prefix, attribute, nick)
report = ScatterPlotReport(
filter_config=[config_before, config_after],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 9,626 | 34.788104 | 80 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue684/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue684-base", "issue684-v1"]
CONFIGS = [
IssueConfig(
alias, [], driver_options=["--alias", alias, "--search-time-limit", "60s"])
for alias in [
"seq-sat-fd-autotune-1", "seq-sat-fd-autotune-2",
"seq-sat-fdss-1", "seq-sat-fdss-2", "seq-sat-fdss-2014",
"seq-sat-lama-2011", "lama-first"]
]
SUITE = [
'airport', 'assembly', 'barman-sat11-strips',
'barman-sat14-strips', 'blocks', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot',
'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell',
'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips',
'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic',
'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime',
'mystery', 'nomystery-sat11-strips', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp()
| 2,500 | 37.476923 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue684/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step(
'publish-absolute-report', subprocess.call, ['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step(
"publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step(step_name, make_scatter_plots))
| 11,446 | 33.068452 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue684/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue655/base.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not availabe, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']),
IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']),
IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']),
IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']),
IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']),
IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']),
IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
#exp.add_comparison_table_step()
#if matplotlib:
#for attribute in ["memory", "total_time"]:
#for config in configs:
#exp.add_report(
#RelativeScatterPlotReport(
#attributes=[attribute],
#filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
#get_category=lambda run1, run2: run1.get("domain"),
#),
#outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
#)
exp()
main(revisions=['issue655-base'])
| 5,183 | 55.967033 | 280 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue655/base-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not availabe, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks')
suite=suites.suite_optimal_strips()
# dummy configs with correct names so that comparison report works
configs = {
IssueConfig('rl-b50k', []),
IssueConfig('cggl-b50k', []),
IssueConfig('dfp-b50k', []),
IssueConfig('rl-ginf', []),
IssueConfig('cggl-ginf', []),
IssueConfig('dfp-ginf', []),
IssueConfig('rl-f50k', []),
IssueConfig('cggl-f50k', []),
IssueConfig('dfp-f50k', []),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_fetcher('data/issue655-base-eval')
exp.add_fetcher('data/issue655-v1-eval')
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue655-base', 'issue655-v1'])
| 3,379 | 34.578947 | 107 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue655/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']),
IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']),
IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']),
IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']),
IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']),
IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']),
IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py')
exp.add_command('ms-parser', ['ms_parser'])
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
#exp.add_comparison_table_step()
#if matplotlib:
#for attribute in ["memory", "total_time"]:
#for config in configs:
#exp.add_report(
#RelativeScatterPlotReport(
#attributes=[attribute],
#filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
#get_category=lambda run1, run2: run1.get("domain"),
#),
#outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
#)
exp()
main(revisions=['issue655-v1'])
| 5,263 | 56.846154 | 293 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue655/suites.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import textwrap
HELP = "Convert suite name to list of domains or tasks."
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(set(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat()))
def suite_unsolvable():
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl() + suite_ipc14_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt() + suite_ipc14_opt_strips())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl() + suite_ipc14_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat() + suite_ipc14_sat_strips())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_ipc14() + suite_alternative_formulations())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("suite", help="suite name")
return parser.parse_args()
def main():
prefix = "suite_"
suite_names = [
name[len(prefix):] for name in sorted(globals().keys())
if name.startswith(prefix)]
parser = argparse.ArgumentParser(description=HELP)
parser.add_argument("suite", choices=suite_names, help="suite name")
parser.add_argument(
"--width", default=72, type=int,
help="output line width (default: %(default)s). Use 1 for single "
"column.")
args = parser.parse_args()
suite_func = globals()[prefix + args.suite]
print(textwrap.fill(
str(suite_func()),
width=args.width,
break_long_words=False,
break_on_hyphens=False))
if __name__ == "__main__":
main()
| 8,551 | 23.364672 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue655/ms-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int)
parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float)
parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int)
parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float)
def check_ms_constructed(content, props):
ms_construction_time = props.get('ms_construction_time')
abstraction_constructed = False
if ms_construction_time is not None:
abstraction_constructed = True
props['ms_abstraction_constructed'] = abstraction_constructed
parser.add_function(check_ms_constructed)
def check_planner_exit_reason(content, props):
ms_abstraction_constructed = props.get('ms_abstraction_constructed')
error = props.get('error')
if error != 'none' and error != 'timeout' and error != 'out-of-memory':
print 'error: %s' % error
return
# Check whether merge-and-shrink computation or search ran out of
# time or memory.
ms_out_of_time = False
ms_out_of_memory = False
search_out_of_time = False
search_out_of_memory = False
if ms_abstraction_constructed == False:
if error == 'timeout':
ms_out_of_time = True
elif error == 'out-of-memory':
ms_out_of_memory = True
elif ms_abstraction_constructed == True:
if error == 'timeout':
search_out_of_time = True
elif error == 'out-of-memory':
search_out_of_memory = True
props['ms_out_of_time'] = ms_out_of_time
props['ms_out_of_memory'] = ms_out_of_memory
props['search_out_of_time'] = search_out_of_time
props['search_out_of_memory'] = search_out_of_memory
parser.add_function(check_planner_exit_reason)
def check_perfect_heuristic(content, props):
plan_length = props.get('plan_length')
expansions = props.get('expansions')
if plan_length != None:
perfect_heuristic = False
if plan_length + 1 == expansions:
perfect_heuristic = True
props['perfect_heuristic'] = perfect_heuristic
parser.add_function(check_perfect_heuristic)
def check_proved_unsolvability(content, props):
proved_unsolvability = False
if props['coverage'] == 0:
for line in content.splitlines():
if line == 'Completely explored state space -- no solution!':
proved_unsolvability = True
break
props['proved_unsolvability'] = proved_unsolvability
parser.add_function(check_proved_unsolvability)
parser.parse()
| 2,784 | 37.150685 | 135 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue655/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, benchmarks_dir, suite, revisions=[], configs={},
grid_priority=None, path=None, test_suite=None,
email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(benchmarks_dir, suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,496 | 33.907821 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue655/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue482/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (node.endswith("cluster.bc2.ch") or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
# TODO: Once we have reference results, we should add "quality".
# TODO: Add something about errors/exit codes.
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" % (rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for attribute in valid_attributes:
name = "-".join([rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,741 | 35.614943 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue482/issue482.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_gapdb': [
'--search',
'astar(gapdb())'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue482-base", "issue482-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 396 | 16.26087 | 54 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue887/translator_additional_parser.py
|
#!/usr/bin/env python
import hashlib
from lab.parser import Parser
def add_hash_value(content, props):
props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest()
parser = Parser()
parser.add_function(add_hash_value, file="output.sas")
parser.parse()
| 273 | 20.076923 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue887/v1.py
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
from collections import defaultdict
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab import tools
from downward.reports.compare import ComparativeReport
from downward.reports import PlanningReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
EXPNAME = common_setup.get_experiment_name()
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue887-base", "issue887-v1"]
CONFIGS = [
IssueConfig(
"translate-only",
[],
driver_options=["--translate"])
]
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
# This was generated by running "./suites.py all" in the benchmarks
# repository.
SUITE = [
'agricola-opt18-strips',
'agricola-sat18-strips',
'airport',
'airport-adl',
'assembly',
'barman-mco14-strips',
'barman-opt11-strips',
'barman-opt14-strips',
'barman-sat11-strips',
'barman-sat14-strips',
'blocks',
'caldera-opt18-adl',
'caldera-sat18-adl',
'caldera-split-opt18-adl',
'caldera-split-sat18-adl',
'cavediving-14-adl',
'childsnack-opt14-strips',
'childsnack-sat14-strips',
'citycar-opt14-adl',
'citycar-sat14-adl',
'data-network-opt18-strips',
'data-network-sat18-strips',
'depot',
'driverlog',
'elevators-opt08-strips',
'elevators-opt11-strips',
'elevators-sat08-strips',
'elevators-sat11-strips',
'flashfill-sat18-adl',
'floortile-opt11-strips',
'floortile-opt14-strips',
'floortile-sat11-strips',
'floortile-sat14-strips',
'freecell',
'ged-opt14-strips',
'ged-sat14-strips',
'grid',
'gripper',
'hiking-agl14-strips',
'hiking-opt14-strips',
'hiking-sat14-strips',
'logistics00',
'logistics98',
'maintenance-opt14-adl',
'maintenance-sat14-adl',
'miconic',
'miconic-fulladl',
'miconic-simpleadl',
'movie',
'mprime',
'mystery',
'no-mprime',
'no-mystery',
'nomystery-opt11-strips',
'nomystery-sat11-strips',
'nurikabe-opt18-adl',
'nurikabe-sat18-adl',
'openstacks',
'openstacks-agl14-strips',
'openstacks-opt08-adl',
'openstacks-opt08-strips',
'openstacks-opt11-strips',
'openstacks-opt14-strips',
'openstacks-sat08-adl',
'openstacks-sat08-strips',
'openstacks-sat11-strips',
'openstacks-sat14-strips',
'openstacks-strips',
'optical-telegraphs',
'organic-synthesis-opt18-strips',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-opt18-strips',
'organic-synthesis-split-sat18-strips',
'parcprinter-08-strips',
'parcprinter-opt11-strips',
'parcprinter-sat11-strips',
'parking-opt11-strips',
'parking-opt14-strips',
'parking-sat11-strips',
'parking-sat14-strips',
'pathways',
'pathways-noneg',
'pegsol-08-strips',
'pegsol-opt11-strips',
'pegsol-sat11-strips',
'petri-net-alignment-opt18-strips',
'philosophers',
'pipesworld-notankage',
'pipesworld-tankage',
'psr-large',
'psr-middle',
'psr-small',
'rovers',
'satellite',
'scanalyzer-08-strips',
'scanalyzer-opt11-strips',
'scanalyzer-sat11-strips',
'schedule',
'settlers-opt18-adl',
'settlers-sat18-adl',
'snake-opt18-strips',
'snake-sat18-strips',
'sokoban-opt08-strips',
'sokoban-opt11-strips',
'sokoban-sat08-strips',
'sokoban-sat11-strips',
'spider-opt18-strips',
'spider-sat18-strips',
'storage',
'termes-opt18-strips',
'termes-sat18-strips',
'tetris-opt14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'thoughtful-sat14-strips',
'tidybot-opt11-strips',
'tidybot-opt14-strips',
'tidybot-sat11-strips',
'tpp',
'transport-opt08-strips',
'transport-opt11-strips',
'transport-opt14-strips',
'transport-sat08-strips',
'transport-sat11-strips',
'transport-sat14-strips',
'trucks',
'trucks-strips',
'visitall-opt11-strips',
'visitall-opt14-strips',
'visitall-sat11-strips',
'visitall-sat14-strips',
'woodworking-opt08-strips',
'woodworking-opt11-strips',
'woodworking-sat08-strips',
'woodworking-sat11-strips',
'zenotravel',
]
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser("translator_additional_parser.py")
del exp.commands['remove-output-sas']
class TranslatorDiffReport(PlanningReport):
def get_cell(self, run):
return ";".join(run.get(attr) for attr in self.attributes)
def get_text(self):
lines = []
for runs in self.problem_runs.values():
hashes = set([r.get("translator_output_sas_hash") for r in runs])
if len(hashes) > 1 or None in hashes:
lines.append(";".join([self.get_cell(r) for r in runs]))
return "\n".join(lines)
class SameValueFilters(object):
"""Ignore runs for a task where all algorithms have the same value."""
def __init__(self, attribute):
self._attribute = attribute
self._tasks_to_values = defaultdict(list)
def _get_task(self, run):
return (run['domain'], run['problem'])
def store_values(self, run):
value = run.get(self._attribute)
self._tasks_to_values[self._get_task(run)].append(value)
# Don't filter this run, yet.
return True
def filter_tasks_with_equal_values(self, run):
values = self._tasks_to_values[self._get_task(run)]
return len(set(values)) != 1
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_parse_again_step()
exp.add_fetcher(name='fetch')
ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"]
# exp.add_absolute_report_step(
# outfile=os.path.join(exp.eval_dir, "{EXPNAME}.html".format(**locals())),
# attributes=ATTRIBUTES)
exp.add_comparison_table_step(
attributes=ATTRIBUTES)
same_value_filters = SameValueFilters("translator_output_sas_hash")
# exp.add_absolute_report_step(
# outfile=os.path.join(exp.eval_dir, "{EXPNAME}-filtered.html".format(**locals())),
# attributes=ATTRIBUTES,
# filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values])
exp.add_comparison_table_step(
suffix="filtered",
attributes=ATTRIBUTES,
filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values])
exp.add_report(TranslatorDiffReport(
attributes=["domain", "problem", "algorithm", "run_dir"]
), outfile="different_output_sas.csv"
)
exp.run_steps()
| 7,093 | 27.376 | 97 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue887/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, outfile=None, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = outfile or os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-' + os.path.basename(outfile), subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
Use *suffix* to denote a step name and filename suffix if you
want to add multiple different comparison table steps.
All *kwargs* except *suffix* will be passed to the
CompareConfigsReport class. If the keyword argument
*attributes* is not specified, a default list of attributes is
used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
suffix = kwargs.pop("suffix", "")
if suffix:
suffix = "-" + suffix
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare%s.%s" % (
self.name, rev1, rev2, suffix, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare%s.html" % (self.name, rev1, rev2, suffix))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables%s" % suffix, make_comparison_tables)
self.add_step(
"publish-comparison-tables%s" % suffix, publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 15,119 | 36.61194 | 90 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue887/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']),
IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue629-v2-base', 'issue629-v2'])
| 1,679 | 30.698113 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']),
IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue629-v2-base', 'issue629-v4'])
| 1,679 | 30.698113 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/v7-9.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']),
IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
if matplotlib:
for r1, r2 in zip(revisions, revisions[1:]) + [(revisions[0], revisions[-1])]:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in [r1, r2]],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, r1, r2)
)
exp()
main(revisions=['issue629-v7-base', 'issue629-v7', 'issue629-v8', 'issue629-v9'])
| 1,849 | 33.259259 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']),
IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue629-base', 'issue629-v1'])
| 1,676 | 30.641509 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/suites.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import textwrap
HELP = "Convert suite name to list of domains or tasks."
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(set(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat()))
def suite_unsolvable():
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl() + suite_ipc14_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt() + suite_ipc14_opt_strips())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl() + suite_ipc14_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat() + suite_ipc14_sat_strips())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_ipc14() + suite_alternative_formulations())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("suite", help="suite name")
return parser.parse_args()
def main():
prefix = "suite_"
suite_names = [
name[len(prefix):] for name in sorted(globals().keys())
if name.startswith(prefix)]
parser = argparse.ArgumentParser(description=HELP)
parser.add_argument("suite", choices=suite_names, help="suite name")
parser.add_argument(
"--width", default=72, type=int,
help="output line width (default: %(default)s). Use 1 for single "
"column.")
args = parser.parse_args()
suite_func = globals()[prefix + args.suite]
print(textwrap.fill(
str(suite_func()),
width=args.width,
break_long_words=False,
break_on_hyphens=False))
if __name__ == "__main__":
main()
| 8,551 | 23.364672 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, benchmarks_dir, suite, revisions=[], configs={},
grid_priority=None, path=None, test_suite=None,
email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(benchmarks_dir, suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,496 | 33.907821 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/experimental-branches.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=[
'airport',
'depot',
'driverlog',
'elevators-opt08-strips',
'elevators-opt11-strips',
'freecell',
'hiking-opt14-strips',
'pipesworld-tankage',
]
configs = {
IssueConfig(
'astar-blind-ssec',
['--search', 'astar(blind(), pruning=stubborn_sets_ec())']
),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
exp()
# issue629-experimental-base is based on issue629-v2-base and only removed the ordering of actions after pruning
# issue629-experimental is based on issue629-v4 and only removed the ordering of actions after pruning
# Both branches will not be merged.
main(revisions=['issue629-experimental-base', 'issue629-experimental'])
| 1,276 | 25.604167 | 113 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/v10.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']),
IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
if matplotlib:
for r1, r2 in zip(revisions, revisions[1:]) + [(revisions[0], revisions[-1])]:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in [r1, r2]],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, r1, r2)
)
exp()
main(revisions=['issue629-v9', 'issue629-v10'])
| 1,815 | 32.62963 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']),
IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue629-v2-base', 'issue629-v3'])
| 1,679 | 30.698113 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue629/v5-v6.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']),
IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='[email protected]',
)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue629-v5', 'issue629-v6'])
| 1,674 | 30.603774 | 103 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue698/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue698-base", "issue698-v1"]
CONFIGS = [
IssueConfig(
"blind",
["--search", "astar(blind())"],
driver_options=["--search-time-limit", "60s"]
)
]
sys.path.append(BENCHMARKS_DIR)
import suites
SUITE = suites.suite_optimal_strips()
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_command("parser", ["custom-parser.py"])
exp.add_comparison_table_step(
attributes=exp.DEFAULT_TABLE_ATTRIBUTES +
["successor_generator_time", "reopened_until_last_jump"])
exp.add_scatter_plot_step(attributes=["successor_generator_time"])
exp()
| 1,138 | 23.76087 | 72 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue698/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step(
'publish-absolute-report', subprocess.call, ['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step(
"publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step(step_name, make_scatter_plots))
| 11,446 | 33.068452 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue698/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue698/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
class CustomParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern(
"successor_generator_time",
"Building successor generator...done! \[t=(.+)s\]",
required=False,
type=float)
if __name__ == "__main__":
parser = CustomParser()
print "Running custom parser"
parser.parse()
| 430 | 20.55 | 63 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue443/issue443-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_lmcount': [
'--search',
'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue443-base", "issue443-v2"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 470 | 19.478261 | 102 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue443/issue443-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_lmcount': [
'--search',
'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue443-base", "issue443-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 470 | 19.478261 | 102 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue443/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (node.endswith("cluster.bc2.ch") or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
# TODO: Once we have reference results, we should add "quality".
# TODO: Add something about errors/exit codes.
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-compare.html" % (rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for attribute in valid_attributes:
name = "-".join([rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,741 | 35.614943 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue443/issue443-v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_lmcount': [
'--search',
'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue443-base", "issue443-v3"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 470 | 19.478261 | 102 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue443/issue443-v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_lmcount': [
'--search',
'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue443-v4-base", "issue443-v4"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 473 | 19.608696 | 102 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue650/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
import suites
configs = [
IssueConfig(
"cegar-landmarks-10k",
["--search", "astar(cegar(subtasks=[landmarks()],max_states=10000))"]),
IssueConfig(
"cegar-landmarks-goals-900s-debug",
["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"],
build_options=["--debug"],
driver_options=["--debug"],
),
]
revisions = ["issue650-base", "issue650-v2"]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suites.suite_optimal_strips(),
test_suite=["depot:pfile1"],
email="[email protected]",
)
exp.add_comparison_table_step()
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
| 1,238 | 27.159091 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue650/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
import suites
configs = [
IssueConfig(
"cegar-landmarks-10k",
["--search", "astar(cegar(subtasks=[landmarks()],max_states=10000))"]),
IssueConfig(
"cegar-landmarks-goals-900s",
["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"]),
]
revisions = ["issue650-base", "issue650-v1"]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suites.suite_optimal_strips(),
test_suite=["depot:pfile1"],
email="[email protected]",
)
exp.add_comparison_table_step()
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
| 1,151 | 27.097561 | 86 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue650/suites.py
|
# Benchmark suites from the Fast Downward benchmark collection.
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-agl14-adl',
'citycar-agl14-adl',
'maintenance-agl14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-agl14-strips',
'childsnack-agl14-strips',
'floortile-agl14-strips',
'ged-agl14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-agl14-strips',
'tetris-agl14-strips',
'thoughtful-agl14-strips',
'transport-agl14-strips',
'visitall-agl14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-mco14-adl',
'citycar-mco14-adl',
'maintenance-mco14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-mco14-strips',
'floortile-mco14-strips',
'ged-mco14-strips',
'hiking-mco14-strips',
'openstacks-mco14-strips',
'parking-mco14-strips',
'tetris-mco14-strips',
'thoughtful-mco14-strips',
'transport-mco14-strips',
'visitall-mco14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-opt14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-sat14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat())
def suite_unsolvable():
# TODO: Add other unsolvable problems (Miconic-FullADL).
# TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl'
# if the extra-domains branch is merged.
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_alternative_formulations())
| 7,695 | 23.35443 | 77 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue650/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return (
"cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, suite, revisions=[], configs={}, grid_priority=None,
path=None, test_suite=None, email=None, processes=None,
**kwargs):
"""
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
*configs* must be a non-empty list of IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(..., suite=suites.suite_all())
IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(..., suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(..., grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"])
If *email* is specified, it should be an email address. This
email address will be notified upon completion of the experiments
if it is run on the cluster.
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(
priority=grid_priority, email=email)
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
repo = get_repo_base()
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
repo,
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self.add_suite(os.path.join(repo, "benchmarks"), suite)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(self.eval_dir,
get_experiment_name() + "." +
report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(Step('publish-absolute-report',
subprocess.call,
['publish', outfile]))
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = CompareConfigsReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ "." + report.output_format)
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare" % (self.name, rev1, rev2)
+ ".html")
subprocess.call(['publish', outfile])
self.add_step(Step("make-comparison-tables", make_comparison_tables))
self.add_step(Step("publish-comparison-tables", publish_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,481 | 33.963585 | 83 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue650/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows how a specific attribute in two
configurations. The attribute value in config 1 is shown on the
x-axis and the relation to the value in config 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['config'] == self.configs[0] and
run2['config'] == self.configs[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.configs[0], val1)
assert val2 > 0, (domain, problem, self.configs[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlots use log-scaling on the x-axis by default.
default_xscale = 'log'
if self.attribute and self.attribute in self.LINEAR:
default_xscale = 'linear'
PlotReport._set_scales(self, xscale or default_xscale, 'log')
| 3,921 | 35.654206 | 84 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(combinations=[("issue571-base", "issue571-base", "issue571-base-v2"), ("issue571-base", "issue571-base", "issue571-v2")])
| 198 | 27.428571 | 126 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/main.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
import common_setup
def main(revisions=None, combinations=None):
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
B_CONFIGS = {
'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
G_CONFIGS = {
'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
F_CONFIGS = {
'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
}
CONFIGS = dict(B_CONFIGS)
CONFIGS.update(G_CONFIGS)
CONFIGS.update(F_CONFIGS)
exp = common_setup.IssueExperiment(
search_revisions=revisions,
combinations=combinations,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
exp.add_search_parser('ms-parser.py')
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
actual_search_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
exp.add_comparison_table_step(attributes=attributes)
exp()
| 4,450 | 55.341772 | 273 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(combinations=[("issue571-base", "issue571-base", "issue571-v3"), ("issue571-base", "issue571-base", "issue571-v4")])
| 193 | 26.714286 | 121 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(revisions=["issue571-base", "issue571-v1"])
| 120 | 16.285714 | 48 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/ms-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('initial_h_value', 'initial h value: (\d+)', required=False, type=int)
parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int)
parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float)
def check_ms_constructed(content, props):
ms_construction_time = props.get('ms_construction_time')
abstraction_constructed = False
if ms_construction_time is not None:
abstraction_constructed = True
props['ms_abstraction_constructed'] = abstraction_constructed
parser.add_function(check_ms_constructed)
def check_proved_unsolvability(content, props):
proved_unsolvability = False
if props['coverage'] == 0:
for line in content.splitlines():
if line == 'Completely explored state space -- no solution!':
proved_unsolvability = True
break
props['proved_unsolvability'] = proved_unsolvability
parser.add_function(check_proved_unsolvability)
def check_planner_exit_reason(content, props):
ms_abstraction_constructed = props.get('ms_abstraction_constructed')
error = props.get('error')
if error != 'none' and error != 'timeout' and error != 'out-of-memory':
print 'error: %s' % error
return
# Check whether merge-and-shrink computation or search ran out of
# time or memory.
ms_out_of_time = False
ms_out_of_memory = False
search_out_of_time = False
search_out_of_memory = False
if ms_abstraction_constructed == False:
if error == 'timeout':
ms_out_of_time = True
elif error == 'out-of-memory':
ms_out_of_memory = True
elif ms_abstraction_constructed == True:
if error == 'timeout':
search_out_of_time = True
elif error == 'out-of-memory':
search_out_of_memory = True
props['ms_out_of_time'] = ms_out_of_time
props['ms_out_of_memory'] = ms_out_of_memory
props['search_out_of_time'] = search_out_of_time
props['search_out_of_memory'] = search_out_of_memory
# Compute actual search time
if ms_abstraction_constructed == True and props.get('search_time') is not None:
difference = props.get('search_time') - props.get('ms_construction_time')
props['actual_search_time'] = difference
parser.add_function(check_planner_exit_reason)
def check_perfect_heuristic(content, props):
plan_length = props.get('plan_length')
expansions = props.get('expansions')
if plan_length != None:
perfect_heuristic = False
if plan_length + 1 == expansions:
perfect_heuristic = True
props['perfect_heuristic'] = perfect_heuristic
parser.add_function(check_perfect_heuristic)
parser.parse()
| 2,879 | 36.402597 | 128 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, email=None, processes=1,
combinations=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment(processes=processes)
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority,
email=email)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, combinations, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
if combinations:
kwargs["combinations"].extend([
(Translator(repo, comb[0]),
Preprocessor(repo, comb[1]),
Planner(repo, comb[2]))
for comb in combinations])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-%s-compare.html" %
(self.name, rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
for attribute in self.get_supported_attributes(
config_nick, attributes):
make_scatter_plot(config_nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 13,259 | 35.13079 | 88 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(combinations=[("issue571-base", "issue571-base", "issue571-v2"), ("issue571-base", "issue571-base", "issue571-v3")])
| 193 | 26.714286 | 121 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/v6.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(combinations=[("issue571-base", "issue571-base", "issue571-v5"), ("issue571-base", "issue571-base", "issue571-v6")])
| 193 | 26.714286 | 121 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/v5.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(combinations=[("issue571-base", "issue571-base", "issue571-v4"), ("issue571-base", "issue571-base", "issue571-v5")])
| 193 | 26.714286 | 121 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/compare-base-base-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareConfigsReport
import common_setup
REVS = ["issue571-base", "issue571-base-v2"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
B_CONFIGS = {
'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
G_CONFIGS = {
'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'],
}
F_CONFIGS = {
'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'],
}
CONFIGS = dict(B_CONFIGS)
CONFIGS.update(G_CONFIGS)
CONFIGS.update(F_CONFIGS)
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
test_suite=['depot:pfile1'],
processes=4,
email='[email protected]',
)
# planner outcome attributes
perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False)
proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False)
actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm])
# m&s attributes
ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm])
ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False)
ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True)
ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True)
ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True)
search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True)
search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True)
extra_attributes = [
perfect_heuristic,
proved_unsolvability,
actual_search_time,
ms_construction_time,
ms_abstraction_constructed,
ms_final_size,
ms_out_of_memory,
ms_out_of_time,
search_out_of_memory,
search_out_of_time,
actual_search_time,
]
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend(extra_attributes)
def filter_base1(run):
config = run['config']
if config.startswith('issue571-base-'):
return True
return False
def filter_base2(run):
config = run['config']
if config.startswith('issue571-base-issue571-base-issue571-base-v2-'):
return True
return False
exp.add_fetcher('data/issue571-v1-eval', filter=filter_base1)
exp.add_fetcher('data/issue571-v2-eval', filter=filter_base2)
# TODO: the following does not work, presumably because issue571-base
# is not represented as issue571-base-issue571-base-issue571-base
#exp.add_comparison_table_step(attributes=attributes)
compared_configs=[]
configs1 = ['issue571-base-%s' % conf for conf in CONFIGS]
configs2 = ['issue571-base-issue571-base-issue571-base-v2-%s' % conf for conf in CONFIGS]
for index in range(len(configs1)):
compared_configs.append((configs1[index], configs2[index]))
exp.add_report(CompareConfigsReport(attributes=attributes,
compared_configs=compared_configs))
exp()
| 5,254 | 50.519608 | 273 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/v8.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(combinations=[("issue571-base", "issue571-base", "issue571-base-v2"), ("issue571-base", "issue571-base", "issue571-v7"), ("issue571-base", "issue571-base", "issue571-v8")])
| 249 | 34.714286 | 177 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue571/v7.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(combinations=[("issue571-base", "issue571-base", "issue571-v6"), ("issue571-base", "issue571-base", "issue571-v7")])
| 193 | 26.714286 | 121 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue544/sat-v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue544-base-v2", "issue544-v2", "issue544-v3"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 541 | 17.689655 | 57 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue544/sat-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue544-base", "issue544-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"eager_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"],
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"],
"lazy_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy(h, preferred=h)"],
"lazy_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 904 | 19.568182 | 45 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue544/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"plan_length",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-%s-compare.html" %
(self.name, rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def is_portfolio(config_nick):
return "fdss" in config_nick
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config_nick in self._config_nicks:
if is_portfolio(config_nick):
valid_attributes = [
attr for attr in attributes
if attr in self.PORTFOLIO_ATTRIBUTES]
else:
valid_attributes = attributes
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
for attribute in valid_attributes:
make_scatter_plot(config_nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,755 | 35.135977 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue544/sat-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue544-base-v2", "issue544-v2"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 526 | 17.172414 | 45 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue544/regression-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Before you can run the experiment you need to create duplicates of the
two tasks we want to test:
cd ../benchmarks/trucks-strips
for i in {00..19}; do cp p16.pddl p16-$i.pddl; done
for i in {00..19}; do cp domain_p16.pddl domain_p16-$i.pddl; done
cd ../freecell
for i in {00..19}; do cp probfreecell-11-5.pddl probfreecell-11-5-$i.pddl; done
Don't forget to remove the duplicate tasks afterwards. Otherwise they
will be included in subsequent experiments.
"""
import common_setup
REVS = ["issue544-base", "issue544-v1"]
LIMITS = {"search_time": 1800}
CONFIGS = {
"eager_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"eager_greedy(h, preferred=h)"],
"eager_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"eager_greedy(h, preferred=h)"],
"lazy_greedy_add": [
"--heuristic",
"h=add()",
"--search",
"lazy_greedy(h, preferred=h)"],
"lazy_greedy_ff": [
"--heuristic",
"h=ff()",
"--search",
"lazy_greedy(h, preferred=h)"],
}
TEST_RUN = False
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = (["trucks-strips:p16-%02d.pddl" % i for i in range(20)] +
["freecell:probfreecell-11-5-%02d.pddl" % i for i in range(20)])
PRIORITY = 0 # number means maia experiment
exp = common_setup.IssueExperiment(
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 1,616 | 23.134328 | 79 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue880/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-v1", "issue880-v2"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick + "-" + max_transitions_nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for max_transitions_nick, max_transitions in [("1M", 1000000)]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]),
#("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
"depot:p01.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
REFINEMENT_ATTRIBUTES +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,707 | 32.02439 | 134 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue880/parser.py
|
#! /usr/bin/env python
import logging
import re
from lab.parser import Parser
class CommonParser(Parser):
def add_difference(self, diff, val1, val2):
def diff_func(content, props):
if props.get(val1) is None or props.get(val2) is None:
diff_val = None
else:
diff_val = props.get(val1) - props.get(val2)
props[diff] = diff_val
self.add_function(diff_func)
def _get_flags(self, flags_string):
flags = 0
for char in flags_string:
flags |= getattr(re, char)
return flags
def add_repeated_pattern(
self, name, regex, file="run.log", required=False, type=int,
flags=""):
def find_all_occurences(content, props):
matches = re.findall(regex, content, flags=self._get_flags(flags))
if required and not matches:
logging.error("Pattern {0} not found in file {1}".format(regex, file))
props[name] = [type(m) for m in matches]
self.add_function(find_all_occurences, file=file)
def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""):
Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags)
def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""):
def search_from_bottom(content, props):
reversed_content = "\n".join(reversed(content.splitlines()))
match = re.search(regex, reversed_content, flags=self._get_flags(flags))
if required and not match:
logging.error("Pattern {0} not found in file {1}".format(regex, file))
if match:
props[name] = type(match.group(1))
self.add_function(search_from_bottom, file=file)
def no_search(content, props):
if "search_start_time" not in props:
error = props.get("error")
if error is not None and error != "incomplete-search-found-no-plan":
props["error"] = "no-search-due-to-" + error
REFINEMENT_ATTRIBUTES = [
("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"),
("time_for_finding_flaws", r"Time for finding flaws: (.+)s"),
("time_for_splitting_states", r"Time for splitting states: (.+)s"),
]
def compute_total_times(content, props):
for attribute, pattern in REFINEMENT_ATTRIBUTES:
props["total_" + attribute] = sum(props[attribute])
def add_time_analysis(content, props):
init_time = props.get("init_time")
if not init_time:
return
parts = []
parts.append("{init_time:.2f}:".format(**props))
for attribute, pattern in REFINEMENT_ATTRIBUTES:
time = props["total_" + attribute]
relative_time = time / init_time
print time, type(time)
parts.append("{:.2f} ({:.2f})".format(time, relative_time))
props["time_analysis"] = " ".join(parts)
def main():
parser = CommonParser()
parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float)
parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int)
parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float)
parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int)
for attribute, pattern in REFINEMENT_ATTRIBUTES:
parser.add_repeated_pattern(attribute, pattern, type=float, required=False)
parser.add_function(no_search)
parser.add_function(compute_total_times)
parser.add_function(add_time_analysis)
parser.parse()
if __name__ == "__main__":
main()
| 3,743 | 34.657143 | 109 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue880/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-base", "issue880-v1"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick + "-" + max_transitions_nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for max_transitions_nick, max_transitions in [("1M", 1000000), ("10M", 10000000)]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]),
("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
#"depot:p02.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
REFINEMENT_ATTRIBUTES +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump", "total_time_for_splitting_states", "total_time_for_finding_traces"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,796 | 33.109756 | 134 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue880/v2-900s.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-base", "issue880-v2"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for nick, config in [
("cegar-original-900s", ["--search", "astar(cegar(subtasks=[original()], max_transitions=infinity, max_time=900))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
"depot:p01.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
REFINEMENT_ATTRIBUTES +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,503 | 30.3 | 144 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue880/v2-max-transitions.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-base", "issue880-v2"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
"{nick}-{million_transitions}M".format(**locals()),
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for million_transitions in [1, 2, 5, 10]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(max_transitions=million_transitions * 10**6)]),
("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(max_transitions=million_transitions * 10**6)]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
"depot:p01.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,741 | 32.851852 | 167 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue880/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,786 | 36.435443 | 82 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue880/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if not val1 or not val2:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,867 | 35.490566 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue778/v1-no-min-ratio.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue778-base", "issue778-v1"]
CONFIGS = [
IssueConfig(
heuristic + "-" + pruning,
["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}())".format(**locals())])
for heuristic in ["blind", "lmcut"]
for pruning in ["ec", "simple"]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER)
exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER)
#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER)
exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER)
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attribute in ["memory", "total_time"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)
)
exp.run_steps()
| 1,869 | 30.694915 | 99 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue778/v1-opt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue778-base", "issue778-v1"]
CONFIGS = [
IssueConfig('lmcut-sss-ratio-0.2', ['--search', 'astar(lmcut(), pruning=stubborn_sets_simple(min_required_pruning_ratio=0.2))']),
IssueConfig('lmcut-ssec-ratio-0.2', ['--search', 'astar(lmcut(), pruning=stubborn_sets_simple(min_required_pruning_ratio=0.2))']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="[email protected]",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER)
exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER)
#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER)
exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER)
#exp.add_absolute_report_step()
exp.add_comparison_table_step()
for attribute in ["memory", "total_time"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)
)
exp.run_steps()
| 1,910 | 33.125 | 134 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue778/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'pipesworld-notankage',
'pipesworld-tankage', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage',
'tetris-opt14-strips', 'tidybot-opt11-strips',
'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips',
'transport-opt11-strips', 'transport-opt14-strips',
'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips',
'woodworking-opt08-strips', 'woodworking-opt11-strips',
'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'airport', 'assembly', 'barman-sat11-strips',
'barman-sat14-strips', 'blocks', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot',
'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips',
'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell',
'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips',
'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic',
'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime',
'mystery', 'nomystery-sat11-strips', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,153 | 35.955614 | 82 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue778/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if val1 is None or val2 is None:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,875 | 35.566038 | 78 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue436/opt-v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import configs
import common_setup
REVS = ["issue436-base", "issue436-v2"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
configs_optimal_core = configs.configs_optimal_core()
CONFIGS = {}
for name in ['astar_merge_and_shrink_greedy_bisim', 'astar_merge_and_shrink_dfp_bisim',
'astar_ipdb', 'astar_hmax', 'astar_blind', 'astar_lmcut',
'astar_merge_and_shrink_bisim', 'astar_lmcount_lm_merged_rhw_hm']:
CONFIGS[name] = configs_optimal_core[name]
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step()
exp()
| 808 | 22.794118 | 87 |
py
|
DAAISy
|
DAAISy-main/dependencies/FD/experiments/issue436/sat-v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import configs
import common_setup
REVS = ["issue436-base", "issue436-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_satisficing_with_ipc11()
default_configs_satisficing = configs.default_configs_satisficing(extended=True)
CONFIGS = {}
for name in ['lazy_greedy_add', 'eager_greedy_ff', 'eager_greedy_add', 'lazy_greedy_ff', 'pareto_ff']:
CONFIGS[name] = default_configs_satisficing[name]
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp()
| 686 | 21.16129 | 102 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.